hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
89a0ef64fc24861c559a967a783cded2d605e1c2 | 7,704 | //! Command line executable.
extern crate pico;
extern crate clap;
extern crate hex;
use std::str::FromStr;
use std::path::Path;
use std::io::stdout;
use pico::{HeaderFormat, major, minor};
use clap::{Arg, App};
use pico::file;
use hex::FromHex;
/// Executable description.
static DESCRIPTION: &str =
"Encode a file as Pico, decode a Pico-encoded file, or dump the header \
from a Pico-encoded file.";
static LONG_DESCRIPTION: &str =
"Input files are encoded by default. If encoding, a .pico extension \
is added to the file. If decoding, then the input must be Pico-encoded \
files, and a .raw extension is added by default. If dumping the header, \
the input files must be Pico-encoded files, and the header is sent to \
standard output in the specified format.
The extension used can be overridden by --extension, which should include \
the dot. Any provided suffix (by default there is none) is added to the \
file's base name.
The header kinds can be JSON, YAML, DICT (Python), or XML.
Keys must be specified as a list of hexadecimal digits (no spaces). If \
no key is specified for encoding, a random key is generated.";
/// Entry point when run from the command line.
fn main() {
// Add some information to the end of the help.
let after = format!(
"{}\n\nPico Encoding Version: {}.{}\nSee: {}",
LONG_DESCRIPTION, major(), minor(), env!("CARGO_PKG_HOMEPAGE")
);
// Parse command line arguments.
let app_matches = App::new("Pico Rust Library")
.version(env!("CARGO_PKG_VERSION"))
.author("The Mons Pico Project")
.about(DESCRIPTION)
.after_help(after.as_str())
.arg(Arg::with_name("verbose")
.short("v")
.long("verbose")
.help("Increase verbosity.")
.takes_value(false))
.arg(Arg::with_name("debug")
.long("debug")
.help("Enable debugging.")
.takes_value(false))
.arg(Arg::with_name("decode")
.conflicts_with("encode")
.conflicts_with("header")
.short("d")
.long("decode")
.help("Decode files.")
.takes_value(false))
.arg(Arg::with_name("encode")
.conflicts_with("decode")
.conflicts_with("header")
.short("e")
.long("encode")
.help("Encode files.")
.takes_value(false))
.arg(Arg::with_name("extension")
.long("extension")
.help("Set output file extension.")
.takes_value(true))
.arg(Arg::with_name("header")
.conflicts_with("encode")
.conflicts_with("decode")
.possible_values(&["DICT", "JSON", "YAML", "XML"])
.short("H")
.long("header")
.value_name("format")
.help("Dump header information.")
.takes_value(true))
.arg(Arg::with_name("suffix")
.short("s")
.long("suffix")
.default_value("")
.help("Suffix to add to output files.")
.takes_value(true))
.arg(Arg::with_name("key")
.short("k")
.long("key")
.help("Specify key for encoding.")
.takes_value(true))
.arg(Arg::with_name("files")
.help("File names to process.")
.multiple(true)
.required(true)
.takes_value(true))
.get_matches();
// Figure out correct operation. This unwrap should not fail since
// the files are required.
let filelist = app_matches.values_of("files").unwrap();
enum Operation {
Header, Encode, Decode,
};
let mut op = Operation::Encode;
if app_matches.is_present("header") { op = Operation::Header; }
if app_matches.is_present("decode") { op = Operation::Decode; }
let header_format = match app_matches.value_of("header") {
None => HeaderFormat::DICT,
// This unwrap should not fail, since the format names are checked
// when parsing the command line.
Some(name) => HeaderFormat::from_str(name).unwrap(),
};
let extension = match app_matches.value_of("extension") {
None => {
match op {
Operation::Decode => ".raw",
_ => ".pico",
}
},
Some(ext) => ext,
};
// This unwrap should never fail since suffix has a default value.
let suffix = app_matches.value_of("suffix").unwrap();
// Perform the operation for each specified file.
for file in filelist {
// Check the file.
let filepath = Path::new(&file);
if filepath.is_dir() {
eprintln!("ERROR: Argument {:?} is a folder.", file);
return;
}
if !filepath.exists() {
eprintln!("ERROR: Argument {:?} is not found.", file);
return;
}
let basename = match filepath.file_stem() {
None => {
eprintln!("ERROR: Argument {:?} is not a file.", file);
return;
},
Some(value) => value,
}.to_string_lossy().into_owned();
let oldname: String = filepath.to_string_lossy().into_owned();
// Perform the correct operation.
match op {
Operation::Header => {
println!("Pico Header as {:?} for: {:?}", header_format, filepath);
match file::dump_header(&oldname, stdout(), &header_format) {
Ok(()) => (),
Err(err) => eprintln!("ERROR: {}", err),
};
},
Operation::Encode => {
// See if the user specified a key; if not, generate one.
let key = match app_matches.value_of("key") {
None => pico::gen_random_key(16),
Some(hex) => {
let hex = hex.to_uppercase().into_bytes();
let hexlen = hex.len();
if hexlen % 2 != 0 {
// I think this is more helpful than the default given
// by the hex package.
eprintln!("ERROR: Key must be an even number of hex digits.");
return;
}
if hexlen == 0 {
// The hex package permits an empth string, so we have
// to trap this here.
eprintln!("ERROR: Key cannot be empty.");
return;
}
match Vec::<u8>::from_hex(hex) {
Ok(value) => value,
Err(err) => {
eprintln!("ERROR: {}", err);
return;
}
}
}
};
let newname = basename + suffix + extension;
println!("Encoding {:?} -> {:?}", oldname, newname);
match file::encode(&oldname, &newname, key, vec![], 0) {
Ok(()) => (),
Err(err) => eprintln!("ERROR: {}", err),
};
},
Operation::Decode => {
let newname = basename + suffix + extension;
println!("Decoding {:?} -> {:?}", oldname, newname);
match file::decode(&oldname, &newname) {
Ok(()) => (),
Err(err) => eprintln!("ERROR: {}", err),
};
},
}
}
}
| 37.038462 | 90 | 0.49987 |
216c1e4263fb31d7164d2157b65ed2dda0a722c2 | 1,503 | /// Add middleware to service
///
/// ## Overview
///
/// Middleware decorates a service, adding additional functionality. It is a
/// concept common to most web frameworks.
///
/// Tower Web uses the Tower stack for middleware (hence the name). This example
/// decorates the application with the LogMiddleware. This middleware logs
/// information for each request.
///
/// ## Usage
///
/// Run the example:
///
/// RUST_LOG="hello_world=info" cargo run --example middleware
///
/// Then send a request:
///
/// curl -v http://localhost:8080/
extern crate env_logger;
extern crate flate2;
#[macro_use]
extern crate tower_web;
extern crate tokio;
use tower_web::ServiceBuilder;
use tower_web::middleware::deflate::DeflateMiddleware;
use tower_web::middleware::log::LogMiddleware;
use flate2::Compression;
#[derive(Clone, Debug)]
pub struct HelloWorld;
impl_web! {
impl HelloWorld {
#[get("/")]
fn hello_world(&self) -> Result<&'static str, ()> {
Ok("hello world")
}
}
}
pub fn main() {
let _ = env_logger::try_init();
let addr = "127.0.0.1:8080".parse().expect("Invalid address");
println!("Listening on http://{}", addr);
ServiceBuilder::new()
.resource(HelloWorld)
// Add middleware, in this case access logging
.middleware(LogMiddleware::new("hello_world::web"))
.middleware(DeflateMiddleware::new(Compression::best()))
// We run the service
.run(&addr)
.unwrap();
}
| 24.639344 | 80 | 0.648037 |
c1e5cef29098332c733f5bff027764c93777d7c6 | 229 | use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Default)]
#[serde(rename_all = "PascalCase")]
pub struct SmsConfigurationType {
external_id: Option<String>,
sns_caller_arn: Option<String>,
}
| 25.444444 | 49 | 0.737991 |
de5303a435043e41720d10d715f183dd03544274 | 1,476 | use crate::int_code::Program;
pub fn part1(input_string: &str) -> String {
part1_patch(input_string, true)
}
pub fn part1_patch(input_string: &str, patch: bool) -> String {
let mut program = Program::parse(input_string);
if patch {
// To do this, before running the program, replace position 1 with the value 12 and replace position 2 with the value 2.
program.write_memory(1, 12);
program.write_memory(2, 2);
}
program.run_for_register0().to_string()
}
pub fn part2(input_string: &str) -> String {
let initial_program = Program::parse(input_string);
for noun in 0..=99 {
for verb in 0..=99 {
let mut program = initial_program.clone();
program.write_memory(1, noun);
program.write_memory(2, verb);
if program.run_for_register0() == 19_690_720 {
return (100 * noun + verb).to_string();
}
}
}
"ERROR".to_string()
}
#[test]
pub fn tests_part1() {
assert_eq!("3500", part1_patch("1,9,10,3,2,3,11,0,99,30,40,50", false));
assert_eq!("2", part1_patch("1,0,0,0,99", false));
assert_eq!("2", part1_patch("2,3,0,3,99", false));
assert_eq!("2", part1_patch("2,4,4,5,99,0", false));
assert_eq!("30", part1_patch("1,1,1,4,99,5,6,0,99", false));
assert_eq!("4570637", part1(include_str!("day02_input.txt")));
}
#[test]
fn tests_part2() {
assert_eq!("5485", part2(include_str!("day02_input.txt")));
}
| 29.52 | 128 | 0.611111 |
b95de7726325bec61d4de766159e437ede9a124c | 20,205 | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate chrono;
extern crate exonum;
#[macro_use]
extern crate exonum_testkit;
extern crate exonum_time;
#[macro_use]
extern crate pretty_assertions;
use chrono::{DateTime, Duration, TimeZone, Utc};
use exonum::{
blockchain::{Schema, TransactionErrorType, TransactionResult},
crypto::{gen_keypair, PublicKey}, helpers::{Height, ValidatorId},
messages::{RawTransaction, Signed}, storage::Snapshot,
};
use exonum_testkit::{ApiKind, TestKitApi, TestKitBuilder, TestNode};
use exonum_time::{
api::ValidatorTime, schema::TimeSchema, time_provider::MockTimeProvider, transactions::Error,
transactions::TxTime, TimeService,
};
use std::{collections::HashMap, iter::FromIterator};
fn assert_storage_times_eq<T: AsRef<Snapshot>>(
snapshot: T,
validators: &[TestNode],
expected_current_time: Option<DateTime<Utc>>,
expected_validators_times: &[Option<DateTime<Utc>>],
) {
let schema = TimeSchema::new(snapshot);
assert_eq!(schema.time().get(), expected_current_time);
let validators_times = schema.validators_times();
for (i, validator) in validators.iter().enumerate() {
let public_key = &validator.public_keys().service_key;
assert_eq!(
validators_times.get(public_key),
expected_validators_times[i]
);
}
}
fn assert_transaction_result<S: AsRef<Snapshot>>(
snapshot: S,
transaction: &Signed<RawTransaction>,
expected_code: u8,
) -> Option<String> {
let result = Schema::new(snapshot)
.transaction_results()
.get(&transaction.hash());
match result {
Some(TransactionResult(Err(e))) => {
assert_eq!(e.error_type(), TransactionErrorType::Code(expected_code));
e.description().map(str::to_string)
}
_ => {
panic!("Expected Err(), found None or Ok()");
}
}
}
#[test]
fn test_exonum_time_service_with_3_validators() {
let mut testkit = TestKitBuilder::validator()
.with_validators(3)
.with_service(TimeService::new())
.create();
let validators = testkit.network().validators().to_vec();
// Validators time, that is saved in storage, look like this:
// number | 0 | 1 | 2 |
// time | None | None | None |
//
// Consolidated time is None.
assert_storage_times_eq(testkit.snapshot(), &validators, None, &[None, None, None]);
// Add first transaction `tx0` from first validator with time `time0`.
// After that validators time look like this:
// number | 0 | 1 | 2 |
// time | `time0` | None | None |
//
// Consolidated time will have the value `time0`.
let time0 = Utc::now();
let tx0 = {
let (pub_key, sec_key) = validators[0].service_keypair();
TxTime::sign(time0, pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx0]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
Some(time0),
&[Some(time0), None, None],
);
// Add second transaction `tx1` from second validator with time `time1` = `time0` + 10 sec.
// After that validators time look like this:
// number | 0 | 1 | 2 |
// time | `time0` | `time1` | None |
//
// In sorted order: `time1` >= `time0`.
// Consolidated time will have the value `time1`.
let time1 = time0 + Duration::seconds(10);
let tx1 = {
let (pub_key, sec_key) = validators[1].service_keypair();
TxTime::sign(time1, pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx1]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
Some(time1),
&[Some(time0), Some(time1), None],
);
}
#[test]
fn test_exonum_time_service_with_4_validators() {
let mut testkit = TestKitBuilder::validator()
.with_validators(4)
.with_service(TimeService::new())
.create();
let validators = testkit.network().validators().to_vec();
// Validators time, that is saved in storage, look like this:
// number | 0 | 1 | 2 | 3 |
// time | None | None | None | None |
//
// max_byzantine_nodes = (4 - 1) / 3 = 1.
//
// Consolidated time is None.
assert_storage_times_eq(
testkit.snapshot(),
&validators,
None,
&[None, None, None, None],
);
// Add first transaction `tx0` from first validator with time `time0`.
// After that validators time look like this:
// number | 0 | 1 | 2 | 3 |
// time | `time0` | None | None | None |
//
// Consolidated time doesn't change.
let time0 = Utc::now();
let tx0 = {
let (pub_key, sec_key) = validators[0].service_keypair();
TxTime::sign(time0, pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx0]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
None,
&[Some(time0), None, None, None],
);
// Add second transaction `tx1` from second validator with time `time1` = `time0` + 10 sec.
// After that validators time look like this:
// number | 0 | 1 | 2 | 3 |
// time | `time0` | `time1` | None | None |
//
// In sorted order: `time1` >= `time0`.
// Consolidated time doesn't change.
let time1 = time0 + Duration::seconds(10);
let tx1 = {
let (pub_key, sec_key) = validators[1].service_keypair();
TxTime::sign(time1, pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx1]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
None,
&[Some(time0), Some(time1), None, None],
);
// Add third transaction `tx2` from third validator with time `time2` = `time1` + 10 sec.
// After that validators time look like this:
// number | 0 | 1 | 2 | 3 |
// time | `time0` | `time1` | `time2` | None |
//
// In sorted order: `time2` >= `time1` >= `time0`.
// Consolidated time will have the value `time1`.
let time2 = time1 + Duration::seconds(10);
let tx2 = {
let (pub_key, sec_key) = validators[2].service_keypair();
TxTime::sign(time2, pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx2]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
Some(time1),
&[Some(time0), Some(time1), Some(time2), None],
);
// Add fourth transaction `tx3` from fourth validator with time `time3` = `time2` + 10 sec.
// After that validators time look like this:
// number | 0 | 1 | 2 | 3 |
// time | `time0` | `time1` | `time2` | `time3` |
//
// In sorted order: `time3` >= `time2` >= `time1` >= `time0`.
// Consolidated time will have the value `time2`.
let time3 = time2 + Duration::seconds(10);
let tx3 = {
let (pub_key, sec_key) = validators[3].service_keypair();
TxTime::sign(time3, pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx3]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
Some(time2),
&[Some(time0), Some(time1), Some(time2), Some(time3)],
);
}
#[test]
fn test_exonum_time_service_with_7_validators() {
let mut testkit = TestKitBuilder::validator()
.with_validators(7)
.with_service(TimeService::new())
.create();
let validators = testkit.network().validators().to_vec();
let mut validators_times = vec![None; 7];
assert_storage_times_eq(testkit.snapshot(), &validators, None, &validators_times);
let time = Utc::now();
let times = (0..7)
.map(|x| time + Duration::seconds(x * 10))
.collect::<Vec<_>>();
let expected_storage_times = vec![
None,
None,
None,
None,
Some(times[2]),
Some(times[3]),
Some(times[4]),
];
for (i, validator) in validators.iter().enumerate() {
let tx = {
let (pub_key, sec_key) = validator.service_keypair();
TxTime::sign(times[i], pub_key, sec_key)
};
testkit.create_block_with_transactions(txvec![tx.clone()]);
assert_eq!(
Schema::new(testkit.snapshot())
.transaction_results()
.get(&tx.hash()),
Some(TransactionResult(Ok(())))
);
validators_times[i] = Some(times[i]);
assert_storage_times_eq(
testkit.snapshot(),
&validators,
expected_storage_times[i],
&validators_times,
);
}
}
#[test]
fn test_mock_provider() {
let mock_provider = MockTimeProvider::default();
let mut testkit = TestKitBuilder::validator()
.with_service(TimeService::with_provider(mock_provider.clone()))
.create();
let validators = testkit.network().validators().to_vec();
let assert_storage_times = |snapshot: Box<Snapshot>| {
assert_storage_times_eq(
snapshot,
&validators,
Some(mock_provider.time()),
&[Some(mock_provider.time())],
);
};
mock_provider.add_time(Duration::seconds(10));
assert_eq!(Utc.timestamp(10, 0), mock_provider.time());
testkit.create_blocks_until(Height(2));
assert_storage_times(testkit.snapshot());
mock_provider.set_time(Utc.timestamp(50, 0));
assert_eq!(Utc.timestamp(50, 0), mock_provider.time());
testkit.create_blocks_until(Height(4));
assert_storage_times(testkit.snapshot());
mock_provider.add_time(Duration::seconds(20));
assert_eq!(Utc.timestamp(70, 0), mock_provider.time());
testkit.create_blocks_until(Height(6));
assert_storage_times(testkit.snapshot());
mock_provider.set_time(Utc.timestamp(30, 0));
assert_eq!(Utc.timestamp(30, 0), mock_provider.time());
testkit.create_blocks_until(Height(8));
assert_storage_times_eq(
testkit.snapshot(),
&validators,
Some(Utc.timestamp(70, 0)),
&[Some(Utc.timestamp(70, 0))],
);
}
#[test]
fn test_selected_time_less_than_time_in_storage() {
let mut testkit = TestKitBuilder::validator()
.with_validators(1)
.with_service(TimeService::new())
.create();
let validators = testkit.network().validators().to_vec();
let (pub_key_0, _) = validators[0].service_keypair();
let cfg_change_height = Height(5);
let new_cfg = {
let mut cfg = testkit.configuration_change_proposal();
cfg.set_validators(vec![TestNode::new_validator(ValidatorId(0))]);
cfg.set_actual_from(cfg_change_height);
cfg
};
testkit.commit_configuration_change(new_cfg);
testkit.create_blocks_until(cfg_change_height.previous());
let validators = testkit.network().validators().to_vec();
let (pub_key_1, sec_key_1) = validators[0].service_keypair();
let snapshot = testkit.snapshot();
let schema = TimeSchema::new(snapshot);
assert!(schema.time().get().is_some());
assert!(schema.validators_times().get(pub_key_0).is_some());
assert!(schema.validators_times().get(pub_key_1).is_none());
assert_eq!(
schema.time().get(),
schema.validators_times().get(pub_key_0)
);
if let Some(time_in_storage) = schema.time().get() {
let time_tx = time_in_storage - Duration::seconds(10);
let tx = { TxTime::sign(time_tx, pub_key_1, sec_key_1) };
testkit.create_block_with_transactions(txvec![tx.clone()]);
assert_eq!(
Schema::new(testkit.snapshot())
.transaction_results()
.get(&tx.hash()),
Some(TransactionResult(Ok(())))
);
}
let snapshot = testkit.snapshot();
let schema = TimeSchema::new(snapshot);
assert!(schema.time().get().is_some());
assert!(schema.validators_times().get(pub_key_0).is_some());
assert!(schema.validators_times().get(pub_key_1).is_some());
assert_eq!(
schema.time().get(),
schema.validators_times().get(pub_key_0)
);
}
#[test]
fn test_creating_transaction_is_not_validator() {
let mut testkit = TestKitBuilder::validator()
.with_validators(1)
.with_service(TimeService::new())
.create();
let (pub_key, sec_key) = gen_keypair();
let tx = TxTime::sign(Utc::now(), &pub_key, &sec_key);
testkit.create_block_with_transactions(txvec![tx.clone()]);
assert_transaction_result(testkit.snapshot(), &tx, Error::UnknownSender as u8);
let snapshot = testkit.snapshot();
let schema = TimeSchema::new(snapshot);
assert!(schema.time().get().is_none());
assert!(schema.validators_times().get(&pub_key).is_none());
}
#[test]
fn test_transaction_time_less_than_validator_time_in_storage() {
let mut testkit = TestKitBuilder::validator()
.with_validators(1)
.with_service(TimeService::new())
.create();
let validator = &testkit.network().validators().to_vec()[0];
let (pub_key, sec_key) = validator.service_keypair();
let time0 = Utc::now();
let tx0 = TxTime::sign(time0, pub_key, sec_key);
testkit.create_block_with_transactions(txvec![tx0.clone()]);
assert_eq!(
Schema::new(testkit.snapshot())
.transaction_results()
.get(&tx0.hash()),
Some(TransactionResult(Ok(())))
);
let snapshot = testkit.snapshot();
let schema = TimeSchema::new(snapshot);
assert_eq!(schema.time().get(), Some(time0));
assert_eq!(schema.validators_times().get(pub_key), Some(time0));
let time1 = time0 - Duration::seconds(10);
let tx1 = TxTime::sign(time1, pub_key, sec_key);
testkit.create_block_with_transactions(txvec![tx1.clone()]);
assert_transaction_result(
testkit.snapshot(),
&tx1,
Error::ValidatorTimeIsGreater as u8,
);
let snapshot = testkit.snapshot();
let schema = TimeSchema::new(snapshot);
assert_eq!(schema.time().get(), Some(time0));
assert_eq!(schema.validators_times().get(pub_key), Some(time0));
}
fn get_current_time(api: &mut TestKitApi) -> Option<DateTime<Utc>> {
api.public(ApiKind::Service("exonum_time"))
.get("v1/current_time")
.unwrap()
}
fn get_current_validators_times(api: &mut TestKitApi) -> Vec<ValidatorTime> {
api.private(ApiKind::Service("exonum_time"))
.get("v1/validators_times")
.unwrap()
}
fn get_all_validators_times(api: &mut TestKitApi) -> Vec<ValidatorTime> {
api.private(ApiKind::Service("exonum_time"))
.get("v1/validators_times/all")
.unwrap()
}
fn assert_current_time_eq(api: &mut TestKitApi, expected_time: Option<DateTime<Utc>>) {
let current_time = get_current_time(api);
assert_eq!(expected_time, current_time);
}
fn assert_current_validators_times_eq(
api: &mut TestKitApi,
expected_times: &HashMap<PublicKey, Option<DateTime<Utc>>>,
) {
let validators_times = HashMap::from_iter(
get_current_validators_times(api)
.iter()
.map(|validator| (validator.public_key, validator.time)),
);
assert_eq!(*expected_times, validators_times);
}
fn assert_all_validators_times_eq(
api: &mut TestKitApi,
expected_validators_times: &HashMap<PublicKey, Option<DateTime<Utc>>>,
) {
let validators_times = HashMap::from_iter(
get_all_validators_times(api)
.iter()
.map(|validator| (validator.public_key, validator.time)),
);
assert_eq!(*expected_validators_times, validators_times);
}
#[test]
fn test_endpoint_api() {
let mut testkit = TestKitBuilder::validator()
.with_validators(3)
.with_service(TimeService::new())
.create();
let mut api = testkit.api();
let validators = testkit.network().validators().to_vec();
let mut current_validators_times: HashMap<PublicKey, Option<DateTime<Utc>>> =
HashMap::from_iter(
validators
.iter()
.map(|validator| (*validator.service_keypair().0, None)),
);
let mut all_validators_times = HashMap::new();
assert_current_time_eq(&mut api, None);
assert_current_validators_times_eq(&mut api, ¤t_validators_times);
assert_all_validators_times_eq(&mut api, &all_validators_times);
let time0 = Utc::now();
let (pub_key, sec_key) = validators[0].service_keypair();
testkit.create_block_with_transactions(txvec![TxTime::sign(time0, pub_key, sec_key)]);
current_validators_times.insert(*pub_key, Some(time0));
all_validators_times.insert(*pub_key, Some(time0));
assert_current_time_eq(&mut api, Some(time0));
assert_current_validators_times_eq(&mut api, ¤t_validators_times);
assert_all_validators_times_eq(&mut api, &all_validators_times);
let time1 = time0 + Duration::seconds(10);
let (pub_key, sec_key) = validators[1].service_keypair();
testkit.create_block_with_transactions(txvec![TxTime::sign(time1, pub_key, sec_key)]);
current_validators_times.insert(*pub_key, Some(time1));
all_validators_times.insert(*pub_key, Some(time1));
assert_current_time_eq(&mut api, Some(time1));
assert_current_validators_times_eq(&mut api, ¤t_validators_times);
assert_all_validators_times_eq(&mut api, &all_validators_times);
let time2 = time1 + Duration::seconds(10);
let (pub_key, sec_key) = validators[2].service_keypair();
testkit.create_block_with_transactions(txvec![TxTime::sign(time2, pub_key, sec_key)]);
current_validators_times.insert(*pub_key, Some(time2));
all_validators_times.insert(*pub_key, Some(time2));
assert_current_time_eq(&mut api, Some(time2));
assert_current_validators_times_eq(&mut api, ¤t_validators_times);
assert_all_validators_times_eq(&mut api, &all_validators_times);
let public_key_0 = validators[0].service_keypair().0;
let cfg_change_height = Height(10);
let new_cfg = {
let mut cfg = testkit.configuration_change_proposal();
cfg.set_validators(vec![
TestNode::new_validator(ValidatorId(3)),
validators[1].clone(),
validators[2].clone(),
]);
cfg.set_actual_from(cfg_change_height);
cfg
};
testkit.commit_configuration_change(new_cfg);
testkit.create_blocks_until(cfg_change_height.previous());
current_validators_times.remove(public_key_0);
let validators = testkit.network().validators().to_vec();
current_validators_times.insert(*validators[0].service_keypair().0, None);
let snapshot = testkit.snapshot();
let schema = TimeSchema::new(&snapshot);
if let Some(time) = schema.validators_times().get(public_key_0) {
all_validators_times.insert(*public_key_0, Some(time));
}
assert_current_time_eq(&mut api, Some(time2));
assert_current_validators_times_eq(&mut api, ¤t_validators_times);
assert_all_validators_times_eq(&mut api, &all_validators_times);
let time3 = time2 + Duration::seconds(10);
let (pub_key, sec_key) = validators[0].service_keypair();
testkit.create_block_with_transactions(txvec![TxTime::sign(time3, pub_key, sec_key)]);
current_validators_times.insert(*pub_key, Some(time3));
all_validators_times.insert(*pub_key, Some(time3));
assert_current_time_eq(&mut api, Some(time3));
assert_current_validators_times_eq(&mut api, ¤t_validators_times);
assert_all_validators_times_eq(&mut api, &all_validators_times);
}
| 33.563123 | 97 | 0.643702 |
8a86ac8164d1021b49135dbe084b7e23ec58013a | 2,223 | #![allow(dead_code)]
#![allow(unused_imports)]
use crate::device::{RenderDevice, RenderDeviceId, RenderDeviceInfo};
use crate::error::{Error, Result};
use failure::Fail;
use std::fmt;
use std::sync::{Arc, RwLock};
bitflags! {
pub struct RenderDebugFlags: u32 {
/// No debugger support.
const NONE = 0x0;
/// Enable RenderDoc integration.
const RENDER_DOC = 0x1;
/// Enable PIX integration.
const PIX = 0x2;
/// Enable CPU validation layer(s)
const CPU_VALIDATION = 0x4;
/// Enable GPU validation layer(s)
const GPU_VALIDATION = 0x8;
/// Enable post crash analysis layer
const POST_CRASH_ANALYSIS = 0x10;
}
}
/*pub enum RenderBackendApi {
Dx12,
Vulkan,
Metal,
Mock,
Proxy(String),
}*/
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RenderBackendSettings {
/// API to use (i.e. "Dx12", "Vulkan", etc)
pub api: String, //RenderBackendApi,
/// Address to use (i.e. when routing through proxy)
/// i.e. x.x.x.x:50080
pub address: Option<String>,
//Handle deviceWindow = nullptr;
pub debug_flags: RenderDebugFlags,
}
pub trait RenderBackend: fmt::Debug {
fn is_initialized(&self) -> bool;
fn enumerate_devices(
&mut self,
max_devices: u32,
mirror_count: u32,
software: bool,
) -> Result<Vec<RenderDeviceInfo>>;
fn create_device(&mut self, device_id: RenderDeviceId) -> Result<()>;
fn destroy_device(&mut self, device_id: RenderDeviceId) -> Result<()>;
fn get_device(
&self,
device_id: RenderDeviceId,
) -> Result<Arc<RwLock<Option<Box<dyn RenderDevice>>>>>;
fn begin_debug_capture(&self, name: &str) -> Result<()>;
fn finish_debug_capture(&self) -> Result<()>;
fn trigger_debug_capture(&self) -> Result<()>;
fn launch_debug_capture(&self, quit: bool) -> Result<()>;
}
pub struct RenderBackendRegistry {
pub settings: RenderBackendSettings,
pub backend: Arc<RwLock<Box<dyn RenderBackend>>>,
}
pub trait RenderBackendModule: fmt::Debug {
fn name(&self) -> &'static str;
fn api(&self) -> &'static str;
fn create(&self) -> Box<dyn RenderBackend>;
}
| 25.848837 | 74 | 0.631129 |
084a9f2c4f8b612dd825ea910078323fb6f289cd | 10,576 | //! Sources of sound and various filters.
use std::time::Duration;
use Sample;
pub use self::amplify::Amplify;
pub use self::blt::BltFilter;
pub use self::buffered::Buffered;
pub use self::channel_volume::ChannelVolume;
pub use self::delay::Delay;
pub use self::done::Done;
pub use self::empty::Empty;
pub use self::fadein::FadeIn;
pub use self::from_factory::{from_factory, FromFactoryIter};
pub use self::from_iter::{from_iter, FromIter};
pub use self::mix::Mix;
pub use self::pausable::Pausable;
pub use self::periodic::PeriodicAccess;
pub use self::repeat::Repeat;
pub use self::samples_converter::SamplesConverter;
pub use self::sine::SineWave;
pub use self::spatial::Spatial;
pub use self::speed::Speed;
pub use self::stoppable::Stoppable;
pub use self::take::TakeDuration;
pub use self::uniform::UniformSourceIterator;
pub use self::zero::Zero;
mod amplify;
mod blt;
mod buffered;
mod channel_volume;
mod delay;
mod done;
mod empty;
mod fadein;
mod from_factory;
mod from_iter;
mod mix;
mod pausable;
mod periodic;
mod repeat;
mod samples_converter;
mod sine;
mod spatial;
mod speed;
mod stoppable;
mod take;
mod uniform;
mod zero;
/// A source of samples.
///
/// # A quick lesson about sounds
///
/// ## Sampling
///
/// A sound is a vibration that propagates through air and reaches your ears. This vibration can
/// be represented as an analog signal.
///
/// In order to store this signal in the computer's memory or on the disk, we perform what is
/// called *sampling*. The consists in choosing an interval of time (for example 20µs) and reading
/// the amplitude of the signal at each interval (for example, if the interval is 20µs we read the
/// amplitude every 20µs). By doing so we obtain a list of numerical values, each value being
/// called a *sample*.
///
/// Therefore a sound can be represented in memory by a frequency and a list of samples. The
/// frequency is expressed in hertz and corresponds to the number of samples that have been
/// read per second. For example if we read one sample every 20µs, the frequency would be
/// 50000 Hz. In reality, common values for the frequency are 44100, 48000 and 96000.
///
/// ## Channels
///
/// But a frequency and a list of values only represent one signal. When you listen to a sound,
/// your left and right ears don't receive exactly the same signal. In order to handle this,
/// we usually record not one but two different signals: one for the left ear and one for the right
/// ear. We say that such a sound has two *channels*.
///
/// Sometimes sounds even have five or six channels, each corresponding to a location around the
/// head of the listener.
///
/// The standard in audio manipulation is to *interleave* the multiple channels. In other words,
/// in a sound with two channels the list of samples contains the first sample of the first
/// channel, then the first sample of the second channel, then the second sample of the first
/// channel, then the second sample of the second channel, and so on. The same applies if you have
/// more than two channels. The rodio library only supports this schema.
///
/// Therefore in order to represent a sound in memory in fact we need three characteristics: the
/// frequency, the number of channels, and the list of samples.
///
/// ## The `Source` trait
///
/// A Rust object that represents a sound should implement the `Source` trait.
///
/// The three characteristics that describe a sound are provided through this trait:
///
/// - The number of channels can be retrieved with `channels`.
/// - The frequency can be retrieved with `sample_rate`.
/// - The list of values can be retrieved by iterating on the source. The `Source` trait requires
/// that the `Iterator` trait be implemented as well.
///
/// # Frames
///
/// The samples rate and number of channels of some sound sources can change by itself from time
/// to time.
///
/// > **Note**: As a basic example, if you play two audio files one after the other and treat the
/// > whole as a single source, then the channels and samples rate of that source may change at the
/// > transition between the two files.
///
/// However, for optimization purposes rodio supposes that the number of channels and the frequency
/// stay the same for long periods of time and avoids calling `channels()` and
/// `sample_rate` too frequently.
///
/// In order to properly handle this situation, the `current_frame_len()` method should return
/// the number of samples that remain in the iterator before the samples rate and number of
/// channels can potentially change.
///
pub trait Source: Iterator
where
Self::Item: Sample,
{
/// Returns the number of samples before the current frame ends. `None` means "infinite" or
/// "until the sound ends".
/// Should never return 0 unless there's no more data.
///
/// After the engine has finished reading the specified number of samples, it will check
/// whether the value of `channels()` and/or `sample_rate()` have changed.
fn current_frame_len(&self) -> Option<usize>;
/// Returns the number of channels. Channels are always interleaved.
fn channels(&self) -> u16;
/// Returns the rate at which the source should be played. In number of samples per second.
fn sample_rate(&self) -> u32;
/// Returns the total duration of this source, if known.
///
/// `None` indicates at the same time "infinite" or "unknown".
fn total_duration(&self) -> Option<Duration>;
/// Stores the source in a buffer in addition to returning it. This iterator can be cloned.
#[inline]
fn buffered(self) -> Buffered<Self>
where
Self: Sized,
{
buffered::buffered(self)
}
/// Mixes this source with another one.
#[inline]
fn mix<S>(self, other: S) -> Mix<Self, S>
where
Self: Sized,
S: Source,
S::Item: Sample,
{
mix::mix(self, other)
}
/// Repeats this source forever.
///
/// Note that this works by storing the data in a buffer, so the amount of memory used is
/// proportional to the size of the sound.
#[inline]
fn repeat_infinite(self) -> Repeat<Self>
where
Self: Sized,
{
repeat::repeat(self)
}
/// Takes a certain duration of this source and then stops.
#[inline]
fn take_duration(self, duration: Duration) -> TakeDuration<Self>
where
Self: Sized,
{
take::take_duration(self, duration)
}
/// Delays the sound by a certain duration.
///
/// The rate and channels of the silence will use the same format as the first frame of the
/// source.
#[inline]
fn delay(self, duration: Duration) -> Delay<Self>
where
Self: Sized,
{
delay::delay(self, duration)
}
/// Amplifies the sound by the given value.
#[inline]
fn amplify(self, value: f32) -> Amplify<Self>
where
Self: Sized,
{
amplify::amplify(self, value)
}
/// Fades in the sound.
#[inline]
fn fade_in(self, duration: Duration) -> FadeIn<Self>
where
Self: Sized,
{
fadein::fadein(self, duration)
}
/// Calls the `access` closure on `Self` every time `period` elapsed.
#[inline]
fn periodic_access<F>(self, period: Duration, access: F) -> PeriodicAccess<Self, F>
where
Self: Sized,
F: FnMut(&mut Self),
{
periodic::periodic(self, period, access)
}
/// Changes the play speed of the sound. Does not adjust the samples, only the play speed.
#[inline]
fn speed(self, ratio: f32) -> Speed<Self>
where
Self: Sized,
{
speed::speed(self, ratio)
}
/// Adds a basic reverb effect.
///
/// This function requires the source to implement `Clone`. This can be done by using
/// `buffered()`.
///
/// # Example
///
/// ```ignore
/// use std::time::Duration;
///
/// let source = source.buffered().reverb(Duration::from_millis(100), 0.7);
/// ```
#[inline]
fn reverb(self, duration: Duration, amplitude: f32) -> Mix<Self, Delay<Amplify<Self>>>
where
Self: Sized + Clone,
{
let echo = self.clone().amplify(amplitude).delay(duration);
self.mix(echo)
}
/// Converts the samples of this source to another type.
#[inline]
fn convert_samples<D>(self) -> SamplesConverter<Self, D>
where
Self: Sized,
D: Sample,
{
SamplesConverter::new(self)
}
/// Makes the sound pausable.
// TODO: add example
#[inline]
fn pausable(self, initially_paused: bool) -> Pausable<Self>
where
Self: Sized,
{
pausable::pausable(self, initially_paused)
}
/// Makes the sound stoppable.
// TODO: add example
#[inline]
fn stoppable(self) -> Stoppable<Self>
where
Self: Sized,
{
stoppable::stoppable(self)
}
/// Applies a low-pass filter to the source.
/// **Warning**: Probably buggy.
#[inline]
fn low_pass(self, freq: u32) -> BltFilter<Self>
where
Self: Sized,
Self: Source<Item = f32>,
{
blt::low_pass(self, freq)
}
}
impl<S> Source for Box<Source<Item = S>>
where
S: Sample,
{
#[inline]
fn current_frame_len(&self) -> Option<usize> {
(**self).current_frame_len()
}
#[inline]
fn channels(&self) -> u16 {
(**self).channels()
}
#[inline]
fn sample_rate(&self) -> u32 {
(**self).sample_rate()
}
#[inline]
fn total_duration(&self) -> Option<Duration> {
(**self).total_duration()
}
}
impl<S> Source for Box<Source<Item = S> + Send>
where
S: Sample,
{
#[inline]
fn current_frame_len(&self) -> Option<usize> {
(**self).current_frame_len()
}
#[inline]
fn channels(&self) -> u16 {
(**self).channels()
}
#[inline]
fn sample_rate(&self) -> u32 {
(**self).sample_rate()
}
#[inline]
fn total_duration(&self) -> Option<Duration> {
(**self).total_duration()
}
}
impl<S> Source for Box<Source<Item = S> + Send + Sync>
where
S: Sample,
{
#[inline]
fn current_frame_len(&self) -> Option<usize> {
(**self).current_frame_len()
}
#[inline]
fn channels(&self) -> u16 {
(**self).channels()
}
#[inline]
fn sample_rate(&self) -> u32 {
(**self).sample_rate()
}
#[inline]
fn total_duration(&self) -> Option<Duration> {
(**self).total_duration()
}
}
| 28.583784 | 99 | 0.643438 |
3a55b15cb1da3747422cf2bfe3e5f8d7fc8e5659 | 232 | // functions3.rs
// Make me compile! Execute `rustlings hint functions3` for hints :)
fn main() {
call_me(3);
}
fn call_me(num: i32) {
for i in 0..num {
println!("Ring! Call number {}", i + 1);
}
}
| 17.846154 | 69 | 0.538793 |
e2c31111cddff065111cf53f0dcb554d540fcb7a | 1,875 | // errors2.rs
// Say we're writing a game where you can buy items with tokens. All items cost
// 5 tokens, and whenever you purchase items there is a processing fee of 1
// token. A player of the game will type in how many items they want to buy,
// and the `total_cost` function will calculate the total number of tokens.
// Since the player typed in the quantity, though, we get it as a string-- and
// they might have typed anything, not just numbers!
// Right now, this function isn't handling the error case at all (and isn't
// handling the success case properly either). What we want to do is:
// if we call the `parse` function on a string that is not a number, that
// function will return a `ParseIntError`, and in that case, we want to
// immediately return that error from our function and not try to multiply
// and add.
// There are at least two ways to implement this that are both correct-- but
// one is a lot shorter! Execute `rustlings hint errors2` for hints to both ways.
use std::num::ParseIntError;
pub fn total_cost(item_quantity: &str) -> Result<i32, ParseIntError> {
let processing_fee = 1;
let cost_per_item = 5;
// let qty = item_quantity.parse::<i32>();
// Solution 1, match statement
// let qty = match qty {
// Ok(quantity) => Ok(quantity * cost_per_item + processing_fee),
// Err(error) => Err(error)
// };
// qty
// Solution 2, ? operator
let qty = item_quantity.parse::<i32>()?;
Ok(qty * cost_per_item + processing_fee)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn item_quantity_is_a_valid_number() {
assert_eq!(total_cost("34"), Ok(171));
}
#[test]
fn item_quantity_is_an_invalid_number() {
assert_eq!(
total_cost("beep boop").unwrap_err().to_string(),
"invalid digit found in string"
);
}
}
| 34.090909 | 81 | 0.6672 |
d7630d7ec3ab3b813152411bb8e5fec176e116f1 | 37,300 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::{link};
use llvm::{ValueRef, CallConv, Linkage, get_param};
use llvm;
use middle::weak_lang_items;
use middle::trans::base::push_ctxt;
use middle::trans::base;
use middle::trans::build::*;
use middle::trans::cabi;
use middle::trans::common::*;
use middle::trans::machine;
use middle::trans::type_::Type;
use middle::trans::type_of::*;
use middle::trans::type_of;
use middle::ty::FnSig;
use middle::ty;
use std::cmp;
use libc::c_uint;
use syntax::abi::{Cdecl, Aapcs, C, Win64, Abi};
use syntax::abi::{RustIntrinsic, Rust, RustCall, Stdcall, Fastcall, System};
use syntax::codemap::Span;
use syntax::parse::token::{InternedString, special_idents};
use syntax::parse::token;
use syntax::{ast};
use syntax::{attr, ast_map};
use util::ppaux::{Repr, UserString};
///////////////////////////////////////////////////////////////////////////
// Type definitions
struct ForeignTypes {
/// Rust signature of the function
fn_sig: ty::FnSig,
/// Adapter object for handling native ABI rules (trust me, you
/// don't want to know)
fn_ty: cabi::FnType,
/// LLVM types that will appear on the foreign function
llsig: LlvmSignature,
/// True if there is a return value (not bottom, not unit)
ret_def: bool,
}
struct LlvmSignature {
// LLVM versions of the types of this function's arguments.
llarg_tys: Vec<Type> ,
// LLVM version of the type that this function returns. Note that
// this *may not be* the declared return type of the foreign
// function, because the foreign function may opt to return via an
// out pointer.
llret_ty: Type,
}
///////////////////////////////////////////////////////////////////////////
// Calls to external functions
pub fn llvm_calling_convention(ccx: &CrateContext,
abi: Abi) -> Option<CallConv> {
let os = ccx.sess().targ_cfg.os;
let arch = ccx.sess().targ_cfg.arch;
abi.for_target(os, arch).map(|abi| {
match abi {
RustIntrinsic => {
// Intrinsics are emitted at the call site
ccx.sess().bug("asked to register intrinsic fn");
}
Rust => {
// FIXME(#3678) Implement linking to foreign fns with Rust ABI
ccx.sess().unimpl("foreign functions with Rust ABI");
}
RustCall => {
// FIXME(#3678) Implement linking to foreign fns with Rust ABI
ccx.sess().unimpl("foreign functions with RustCall ABI");
}
// It's the ABI's job to select this, not us.
System => ccx.sess().bug("system abi should be selected elsewhere"),
Stdcall => llvm::X86StdcallCallConv,
Fastcall => llvm::X86FastcallCallConv,
C => llvm::CCallConv,
Win64 => llvm::X86_64_Win64,
// These API constants ought to be more specific...
Cdecl => llvm::CCallConv,
Aapcs => llvm::CCallConv,
}
})
}
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
// Use the names from src/llvm/docs/LangRef.rst here. Most types are only
// applicable to variable declarations and may not really make sense for
// Rust code in the first place but whitelist them anyway and trust that
// the user knows what s/he's doing. Who knows, unanticipated use cases
// may pop up in the future.
//
// ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
// and don't have to be, LLVM treats them as no-ops.
match name {
"appending" => Some(llvm::AppendingLinkage),
"available_externally" => Some(llvm::AvailableExternallyLinkage),
"common" => Some(llvm::CommonLinkage),
"extern_weak" => Some(llvm::ExternalWeakLinkage),
"external" => Some(llvm::ExternalLinkage),
"internal" => Some(llvm::InternalLinkage),
"linkonce" => Some(llvm::LinkOnceAnyLinkage),
"linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
"private" => Some(llvm::PrivateLinkage),
"weak" => Some(llvm::WeakAnyLinkage),
"weak_odr" => Some(llvm::WeakODRLinkage),
_ => None,
}
}
pub fn register_static(ccx: &CrateContext,
foreign_item: &ast::ForeignItem) -> ValueRef {
let ty = ty::node_id_to_type(ccx.tcx(), foreign_item.id);
let llty = type_of::type_of(ccx, ty);
let ident = link_name(foreign_item);
match attr::first_attr_value_str_by_name(foreign_item.attrs.as_slice(),
"linkage") {
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
Some(name) => {
let linkage = match llvm_linkage_by_name(name.get()) {
Some(linkage) => linkage,
None => {
ccx.sess().span_fatal(foreign_item.span,
"invalid linkage specified");
}
};
let llty2 = match ty::get(ty).sty {
ty::ty_ptr(ref mt) => type_of::type_of(ccx, mt.ty),
_ => {
ccx.sess().span_fatal(foreign_item.span,
"must have type `*T` or `*mut T`");
}
};
unsafe {
let g1 = ident.get().with_c_str(|buf| {
llvm::LLVMAddGlobal(ccx.llmod, llty2.to_ref(), buf)
});
llvm::SetLinkage(g1, linkage);
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(ident.get());
let g2 = real_name.with_c_str(|buf| {
llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
g2
}
}
None => unsafe {
ident.get().with_c_str(|buf| {
llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
})
}
}
}
pub fn register_foreign_item_fn(ccx: &CrateContext, abi: Abi, fty: ty::t,
name: &str, span: Option<Span>) -> ValueRef {
/*!
* Registers a foreign function found in a library.
* Just adds a LLVM global.
*/
debug!("register_foreign_item_fn(abi={}, \
ty={}, \
name={})",
abi.repr(ccx.tcx()),
fty.repr(ccx.tcx()),
name);
let cc = match llvm_calling_convention(ccx, abi) {
Some(cc) => cc,
None => {
match span {
Some(s) => {
ccx.sess().span_fatal(s,
format!("ABI `{}` has no suitable calling convention \
for target architecture",
abi.user_string(ccx.tcx())).as_slice())
}
None => {
ccx.sess().fatal(
format!("ABI `{}` has no suitable calling convention \
for target architecture",
abi.user_string(ccx.tcx())).as_slice())
}
}
}
};
// Register the function as a C extern fn
let tys = foreign_types_for_fn_ty(ccx, fty);
// Make sure the calling convention is right for variadic functions
// (should've been caught if not in typeck)
if tys.fn_sig.variadic {
assert!(cc == llvm::CCallConv);
}
// Create the LLVM value for the C extern fn
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let llfn = base::get_extern_fn(ccx,
&mut *ccx.externs.borrow_mut(),
name,
cc,
llfn_ty,
fty);
add_argument_attributes(&tys, llfn);
llfn
}
pub fn trans_native_call<'a>(
bcx: &'a Block<'a>,
callee_ty: ty::t,
llfn: ValueRef,
llretptr: ValueRef,
llargs_rust: &[ValueRef],
passed_arg_tys: Vec<ty::t> )
-> &'a Block<'a> {
/*!
* Prepares a call to a native function. This requires adapting
* from the Rust argument passing rules to the native rules.
*
* # Parameters
*
* - `callee_ty`: Rust type for the function we are calling
* - `llfn`: the function pointer we are calling
* - `llretptr`: where to store the return value of the function
* - `llargs_rust`: a list of the argument values, prepared
* as they would be if calling a Rust function
* - `passed_arg_tys`: Rust type for the arguments. Normally we
* can derive these from callee_ty but in the case of variadic
* functions passed_arg_tys will include the Rust type of all
* the arguments including the ones not specified in the fn's signature.
*/
let ccx = bcx.ccx();
let tcx = bcx.tcx();
debug!("trans_native_call(callee_ty={}, \
llfn={}, \
llretptr={})",
callee_ty.repr(tcx),
ccx.tn.val_to_string(llfn),
ccx.tn.val_to_string(llretptr));
let (fn_abi, fn_sig) = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref fn_ty) => (fn_ty.abi, fn_ty.sig.clone()),
_ => ccx.sess().bug("trans_native_call called on non-function type")
};
let llsig = foreign_signature(ccx, &fn_sig, passed_arg_tys.as_slice());
let ret_def = !return_type_is_void(bcx.ccx(), fn_sig.output);
let fn_type = cabi::compute_abi_info(ccx,
llsig.llarg_tys.as_slice(),
llsig.llret_ty,
ret_def);
let arg_tys: &[cabi::ArgType] = fn_type.arg_tys.as_slice();
let mut llargs_foreign = Vec::new();
// If the foreign ABI expects return value by pointer, supply the
// pointer that Rust gave us. Sometimes we have to bitcast
// because foreign fns return slightly different (but equivalent)
// views on the same type (e.g., i64 in place of {i32,i32}).
if fn_type.ret_ty.is_indirect() {
match fn_type.ret_ty.cast {
Some(ty) => {
let llcastedretptr =
BitCast(bcx, llretptr, ty.ptr_to());
llargs_foreign.push(llcastedretptr);
}
None => {
llargs_foreign.push(llretptr);
}
}
}
for (i, &llarg_rust) in llargs_rust.iter().enumerate() {
let mut llarg_rust = llarg_rust;
if arg_tys[i].is_ignore() {
continue;
}
// Does Rust pass this argument by pointer?
let rust_indirect = type_of::arg_is_indirect(ccx,
*passed_arg_tys.get(i));
debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
i,
ccx.tn.val_to_string(llarg_rust),
rust_indirect,
ccx.tn.type_to_string(arg_tys[i].ty));
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
if !rust_indirect {
let scratch =
base::alloca(bcx,
type_of::type_of(ccx, *passed_arg_tys.get(i)),
"__arg");
base::store_ty(bcx, llarg_rust, scratch, *passed_arg_tys.get(i));
llarg_rust = scratch;
}
debug!("llarg_rust={} (after indirection)",
ccx.tn.val_to_string(llarg_rust));
// Check whether we need to do any casting
match arg_tys[i].cast {
Some(ty) => llarg_rust = BitCast(bcx, llarg_rust, ty.ptr_to()),
None => ()
}
debug!("llarg_rust={} (after casting)",
ccx.tn.val_to_string(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_tys[i].is_indirect();
let llarg_foreign = if foreign_indirect {
llarg_rust
} else {
if ty::type_is_bool(*passed_arg_tys.get(i)) {
let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False);
Trunc(bcx, val, Type::i1(bcx.ccx()))
} else {
Load(bcx, llarg_rust)
}
};
debug!("argument {}, llarg_foreign={}",
i, ccx.tn.val_to_string(llarg_foreign));
// fill padding with undef value
match arg_tys[i].pad {
Some(ty) => llargs_foreign.push(C_undef(ty)),
None => ()
}
llargs_foreign.push(llarg_foreign);
}
let cc = match llvm_calling_convention(ccx, fn_abi) {
Some(cc) => cc,
None => {
// FIXME(#8357) We really ought to report a span here
ccx.sess().fatal(
format!("ABI string `{}` has no suitable ABI \
for target architecture",
fn_abi.user_string(ccx.tcx())).as_slice());
}
};
// A function pointer is called without the declaration available, so we have to apply
// any attributes with ABI implications directly to the call instruction.
let mut attrs = Vec::new();
// Add attributes that are always applicable, independent of the concrete foreign ABI
if fn_type.ret_ty.is_indirect() {
// The outptr can be noalias and nocapture because it's entirely
// invisible to the program. We can also mark it as nonnull
attrs.push((1, llvm::NoAliasAttribute as u64));
attrs.push((1, llvm::NoCaptureAttribute as u64));
attrs.push((1, llvm::NonNullAttribute as u64));
};
// Add attributes that depend on the concrete foreign ABI
let mut arg_idx = if fn_type.ret_ty.is_indirect() { 1 } else { 0 };
match fn_type.ret_ty.attr {
Some(attr) => attrs.push((arg_idx, attr as u64)),
_ => ()
}
arg_idx += 1;
for arg_ty in fn_type.arg_tys.iter() {
if arg_ty.is_ignore() {
continue;
}
// skip padding
if arg_ty.pad.is_some() { arg_idx += 1; }
match arg_ty.attr {
Some(attr) => attrs.push((arg_idx, attr as u64)),
_ => {}
}
arg_idx += 1;
}
let llforeign_retval = CallWithConv(bcx,
llfn,
llargs_foreign.as_slice(),
cc,
attrs.as_slice());
// If the function we just called does not use an outpointer,
// store the result into the rust outpointer. Cast the outpointer
// type to match because some ABIs will use a different type than
// the Rust type. e.g., a {u32,u32} struct could be returned as
// u64.
if ret_def && !fn_type.ret_ty.is_indirect() {
let llrust_ret_ty = llsig.llret_ty;
let llforeign_ret_ty = match fn_type.ret_ty.cast {
Some(ty) => ty,
None => fn_type.ret_ty.ty
};
debug!("llretptr={}", ccx.tn.val_to_string(llretptr));
debug!("llforeign_retval={}", ccx.tn.val_to_string(llforeign_retval));
debug!("llrust_ret_ty={}", ccx.tn.type_to_string(llrust_ret_ty));
debug!("llforeign_ret_ty={}", ccx.tn.type_to_string(llforeign_ret_ty));
if llrust_ret_ty == llforeign_ret_ty {
base::store_ty(bcx, llforeign_retval, llretptr, fn_sig.output)
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast");
Store(bcx, llforeign_retval, llscratch);
let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to());
let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to());
let llrust_size = machine::llsize_of_store(ccx, llrust_ret_ty);
let llforeign_align = machine::llalign_of_min(ccx, llforeign_ret_ty);
let llrust_align = machine::llalign_of_min(ccx, llrust_ret_ty);
let llalign = cmp::min(llforeign_align, llrust_align);
debug!("llrust_size={:?}", llrust_size);
base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
C_uint(ccx, llrust_size as uint), llalign as u32);
}
}
return bcx;
}
pub fn trans_foreign_mod(ccx: &CrateContext, foreign_mod: &ast::ForeignMod) {
let _icx = push_ctxt("foreign::trans_foreign_mod");
for foreign_item in foreign_mod.items.iter() {
let lname = link_name(&**foreign_item);
match foreign_item.node {
ast::ForeignItemFn(..) => {
match foreign_mod.abi {
Rust | RustIntrinsic => {}
abi => {
let ty = ty::node_id_to_type(ccx.tcx(), foreign_item.id);
register_foreign_item_fn(ccx, abi, ty,
lname.get().as_slice(),
Some(foreign_item.span));
}
}
}
_ => {}
}
ccx.item_symbols.borrow_mut().insert(foreign_item.id,
lname.get().to_string());
}
}
///////////////////////////////////////////////////////////////////////////
// Rust functions with foreign ABIs
//
// These are normal Rust functions defined with foreign ABIs. For
// now, and perhaps forever, we translate these using a "layer of
// indirection". That is, given a Rust declaration like:
//
// extern "C" fn foo(i: u32) -> u32 { ... }
//
// we will generate a function like:
//
// S foo(T i) {
// S r;
// foo0(&r, NULL, i);
// return r;
// }
//
// #[inline_always]
// void foo0(uint32_t *r, void *env, uint32_t i) { ... }
//
// Here the (internal) `foo0` function follows the Rust ABI as normal,
// where the `foo` function follows the C ABI. We rely on LLVM to
// inline the one into the other. Of course we could just generate the
// correct code in the first place, but this is much simpler.
pub fn register_rust_fn_with_foreign_abi(ccx: &CrateContext,
sp: Span,
sym: String,
node_id: ast::NodeId)
-> ValueRef {
let _icx = push_ctxt("foreign::register_foreign_fn");
let tys = foreign_types_for_id(ccx, node_id);
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let t = ty::node_id_to_type(ccx.tcx(), node_id);
let cconv = match ty::get(t).sty {
ty::ty_bare_fn(ref fn_ty) => {
let c = llvm_calling_convention(ccx, fn_ty.abi);
c.unwrap_or(llvm::CCallConv)
}
_ => fail!("expected bare fn in register_rust_fn_with_foreign_abi")
};
let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
add_argument_attributes(&tys, llfn);
debug!("register_rust_fn_with_foreign_abi(node_id={:?}, llfn_ty={}, llfn={})",
node_id, ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
llfn
}
pub fn trans_rust_fn_with_foreign_abi(ccx: &CrateContext,
decl: &ast::FnDecl,
body: &ast::Block,
attrs: &[ast::Attribute],
llwrapfn: ValueRef,
id: ast::NodeId) {
let _icx = push_ctxt("foreign::build_foreign_fn");
let tys = foreign_types_for_id(ccx, id);
unsafe { // unsafe because we call LLVM operations
// Build up the Rust function (`foo0` above).
let llrustfn = build_rust_fn(ccx, decl, body, attrs, id);
// Build up the foreign wrapper (`foo` above).
return build_wrap_fn(ccx, llrustfn, llwrapfn, &tys, id);
}
fn build_rust_fn(ccx: &CrateContext,
decl: &ast::FnDecl,
body: &ast::Block,
attrs: &[ast::Attribute],
id: ast::NodeId)
-> ValueRef {
let _icx = push_ctxt("foreign::foreign::build_rust_fn");
let tcx = ccx.tcx();
let t = ty::node_id_to_type(tcx, id);
let ps = ccx.tcx.map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
link::mangle(path.chain(abi.move_iter()), None)
});
// Compute the type that the function would have if it were just a
// normal Rust function. This will be the type of the wrappee fn.
match ty::get(t).sty {
ty::ty_bare_fn(ref f) => {
assert!(f.abi != Rust && f.abi != RustIntrinsic);
}
_ => {
ccx.sess().bug(format!("build_rust_fn: extern fn {} has ty {}, \
expected a bare fn ty",
ccx.tcx.map.path_to_string(id),
t.repr(tcx)).as_slice());
}
};
debug!("build_rust_fn: path={} id={} t={}",
ccx.tcx.map.path_to_string(id),
id, t.repr(tcx));
let llfn = base::decl_internal_rust_fn(ccx, t, ps.as_slice());
base::set_llvm_fn_attrs(attrs, llfn);
base::trans_fn(ccx, decl, body, llfn, ¶m_substs::empty(), id, []);
llfn
}
unsafe fn build_wrap_fn(ccx: &CrateContext,
llrustfn: ValueRef,
llwrapfn: ValueRef,
tys: &ForeignTypes,
id: ast::NodeId) {
let _icx = push_ctxt(
"foreign::trans_rust_fn_with_foreign_abi::build_wrap_fn");
let tcx = ccx.tcx();
let t = ty::node_id_to_type(tcx, id);
debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={})",
ccx.tn.val_to_string(llrustfn),
ccx.tn.val_to_string(llwrapfn),
t.repr(ccx.tcx()));
// Avoid all the Rust generation stuff and just generate raw
// LLVM here.
//
// We want to generate code like this:
//
// S foo(T i) {
// S r;
// foo0(&r, NULL, i);
// return r;
// }
let the_block =
"the block".with_c_str(
|s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llwrapfn, s));
let builder = ccx.builder();
builder.position_at_end(the_block);
// Array for the arguments we will pass to the rust function.
let mut llrust_args = Vec::new();
let mut next_foreign_arg_counter: c_uint = 0;
let next_foreign_arg: |pad: bool| -> c_uint = |pad: bool| {
next_foreign_arg_counter += if pad {
2
} else {
1
};
next_foreign_arg_counter - 1
};
// If there is an out pointer on the foreign function
let foreign_outptr = {
if tys.fn_ty.ret_ty.is_indirect() {
Some(get_param(llwrapfn, next_foreign_arg(false)))
} else {
None
}
};
// Push Rust return pointer, using null if it will be unused.
let rust_uses_outptr =
type_of::return_uses_outptr(ccx, tys.fn_sig.output);
let return_alloca: Option<ValueRef>;
let llrust_ret_ty = tys.llsig.llret_ty;
let llrust_retptr_ty = llrust_ret_ty.ptr_to();
if rust_uses_outptr {
// Rust expects to use an outpointer. If the foreign fn
// also uses an outpointer, we can reuse it, but the types
// may vary, so cast first to the Rust type. If the
// foreign fn does NOT use an outpointer, we will have to
// alloca some scratch space on the stack.
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={}",
ccx.tn.val_to_string(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={} (casted)",
ccx.tn.val_to_string(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
None => {
let slot = builder.alloca(llrust_ret_ty, "return_alloca");
debug!("out pointer, \
allocad={}, \
llrust_ret_ty={}, \
return_ty={}",
ccx.tn.val_to_string(slot),
ccx.tn.type_to_string(llrust_ret_ty),
tys.fn_sig.output.repr(tcx));
llrust_args.push(slot);
return_alloca = Some(slot);
}
}
} else {
// Rust does not expect an outpointer. If the foreign fn
// does use an outpointer, then we will do a store of the
// value that the Rust fn returns.
return_alloca = None;
};
// Build up the arguments to the call to the rust function.
// Careful to adapt for cases where the native convention uses
// a pointer and Rust does not or vice versa.
for i in range(0, tys.fn_sig.inputs.len()) {
let rust_ty = *tys.fn_sig.inputs.get(i);
let llrust_ty = *tys.llsig.llarg_tys.get(i);
let rust_indirect = type_of::arg_is_indirect(ccx, rust_ty);
let llforeign_arg_ty = *tys.fn_ty.arg_tys.get(i);
let foreign_indirect = llforeign_arg_ty.is_indirect();
// skip padding
let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some());
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
debug!("llforeign_arg {}{}: {}", "#",
i, ccx.tn.val_to_string(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
// Ensure that the foreign argument is indirect (by
// pointer). It makes adapting types easier, since we can
// always just bitcast pointers.
if !foreign_indirect {
llforeign_arg = if ty::type_is_bool(rust_ty) {
let lltemp = builder.alloca(Type::bool(ccx), "");
builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp);
lltemp
} else {
let lltemp = builder.alloca(val_ty(llforeign_arg), "");
builder.store(llforeign_arg, lltemp);
lltemp
}
}
// If the types in the ABI and the Rust types don't match,
// bitcast the llforeign_arg pointer so it matches the types
// Rust expects.
if llforeign_arg_ty.cast.is_some() {
assert!(!foreign_indirect);
llforeign_arg = builder.bitcast(llforeign_arg, llrust_ty.ptr_to());
}
let llrust_arg = if rust_indirect {
llforeign_arg
} else {
if ty::type_is_bool(rust_ty) {
let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False);
builder.trunc(tmp, Type::i1(ccx))
} else {
builder.load(llforeign_arg)
}
};
debug!("llrust_arg {}{}: {}", "#",
i, ccx.tn.val_to_string(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
debug!("calling llrustfn = {}, t = {}", ccx.tn.val_to_string(llrustfn), t.repr(ccx.tcx()));
let attributes = base::get_fn_llvm_attributes(ccx, t);
let llrust_ret_val = builder.call(llrustfn, llrust_args.as_slice(), attributes.as_slice());
// Get the return value where the foreign fn expects it.
let llforeign_ret_ty = match tys.fn_ty.ret_ty.cast {
Some(ty) => ty,
None => tys.fn_ty.ret_ty.ty
};
match foreign_outptr {
None if !tys.ret_def => {
// Function returns `()` or `bot`, which in Rust is the LLVM
// type "{}" but in foreign ABIs is "Void".
builder.ret_void();
}
None if rust_uses_outptr => {
// Rust uses an outpointer, but the foreign ABI does not. Load.
let llrust_outptr = return_alloca.unwrap();
let llforeign_outptr_casted =
builder.bitcast(llrust_outptr, llforeign_ret_ty.ptr_to());
let llforeign_retval = builder.load(llforeign_outptr_casted);
builder.ret(llforeign_retval);
}
None if llforeign_ret_ty != llrust_ret_ty => {
// Neither ABI uses an outpointer, but the types don't
// quite match. Must cast. Probably we should try and
// examine the types and use a concrete llvm cast, but
// right now we just use a temp memory location and
// bitcast the pointer, which is the same thing the
// old wrappers used to do.
let lltemp = builder.alloca(llforeign_ret_ty, "");
let lltemp_casted = builder.bitcast(lltemp, llrust_ret_ty.ptr_to());
builder.store(llrust_ret_val, lltemp_casted);
let llforeign_retval = builder.load(lltemp);
builder.ret(llforeign_retval);
}
None => {
// Neither ABI uses an outpointer, and the types
// match. Easy peasy.
builder.ret(llrust_ret_val);
}
Some(llforeign_outptr) if !rust_uses_outptr => {
// Foreign ABI requires an out pointer, but Rust doesn't.
// Store Rust return value.
let llforeign_outptr_casted =
builder.bitcast(llforeign_outptr, llrust_retptr_ty);
builder.store(llrust_ret_val, llforeign_outptr_casted);
builder.ret_void();
}
Some(_) => {
// Both ABIs use outpointers. Easy peasy.
builder.ret_void();
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// General ABI Support
//
// This code is kind of a confused mess and needs to be reworked given
// the massive simplifications that have occurred.
pub fn link_name(i: &ast::ForeignItem) -> InternedString {
match attr::first_attr_value_str_by_name(i.attrs.as_slice(), "link_name") {
Some(ln) => ln.clone(),
None => match weak_lang_items::link_name(i.attrs.as_slice()) {
Some(name) => name,
None => token::get_ident(i.ident),
}
}
}
fn foreign_signature(ccx: &CrateContext, fn_sig: &ty::FnSig, arg_tys: &[ty::t])
-> LlvmSignature {
/*!
* The ForeignSignature is the LLVM types of the arguments/return type
* of a function. Note that these LLVM types are not quite the same
* as the LLVM types would be for a native Rust function because foreign
* functions just plain ignore modes. They also don't pass aggregate
* values by pointer like we do.
*/
let llarg_tys = arg_tys.iter().map(|&arg| arg_type_of(ccx, arg)).collect();
let llret_ty = type_of::arg_type_of(ccx, fn_sig.output);
LlvmSignature {
llarg_tys: llarg_tys,
llret_ty: llret_ty
}
}
fn foreign_types_for_id(ccx: &CrateContext,
id: ast::NodeId) -> ForeignTypes {
foreign_types_for_fn_ty(ccx, ty::node_id_to_type(ccx.tcx(), id))
}
fn foreign_types_for_fn_ty(ccx: &CrateContext,
ty: ty::t) -> ForeignTypes {
let fn_sig = match ty::get(ty).sty {
ty::ty_bare_fn(ref fn_ty) => fn_ty.sig.clone(),
_ => ccx.sess().bug("foreign_types_for_fn_ty called on non-function type")
};
let llsig = foreign_signature(ccx, &fn_sig, fn_sig.inputs.as_slice());
let ret_def = !return_type_is_void(ccx, fn_sig.output);
let fn_ty = cabi::compute_abi_info(ccx,
llsig.llarg_tys.as_slice(),
llsig.llret_ty,
ret_def);
debug!("foreign_types_for_fn_ty(\
ty={}, \
llsig={} -> {}, \
fn_ty={} -> {}, \
ret_def={}",
ty.repr(ccx.tcx()),
ccx.tn.types_to_str(llsig.llarg_tys.as_slice()),
ccx.tn.type_to_string(llsig.llret_ty),
ccx.tn.types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
ccx.tn.type_to_string(fn_ty.ret_ty.ty),
ret_def);
ForeignTypes {
fn_sig: fn_sig,
llsig: llsig,
ret_def: ret_def,
fn_ty: fn_ty
}
}
fn lltype_for_fn_from_foreign_types(ccx: &CrateContext, tys: &ForeignTypes) -> Type {
let mut llargument_tys = Vec::new();
let ret_ty = tys.fn_ty.ret_ty;
let llreturn_ty = if ret_ty.is_indirect() {
llargument_tys.push(ret_ty.ty.ptr_to());
Type::void(ccx)
} else {
match ret_ty.cast {
Some(ty) => ty,
None => ret_ty.ty
}
};
for &arg_ty in tys.fn_ty.arg_tys.iter() {
if arg_ty.is_ignore() {
continue;
}
// add padding
match arg_ty.pad {
Some(ty) => llargument_tys.push(ty),
None => ()
}
let llarg_ty = if arg_ty.is_indirect() {
arg_ty.ty.ptr_to()
} else {
match arg_ty.cast {
Some(ty) => ty,
None => arg_ty.ty
}
};
llargument_tys.push(llarg_ty);
}
if tys.fn_sig.variadic {
Type::variadic_func(llargument_tys.as_slice(), &llreturn_ty)
} else {
Type::func(llargument_tys.as_slice(), &llreturn_ty)
}
}
pub fn lltype_for_foreign_fn(ccx: &CrateContext, ty: ty::t) -> Type {
lltype_for_fn_from_foreign_types(ccx, &foreign_types_for_fn_ty(ccx, ty))
}
fn add_argument_attributes(tys: &ForeignTypes,
llfn: ValueRef) {
let mut i = if tys.fn_ty.ret_ty.is_indirect() {
1i
} else {
0i
};
match tys.fn_ty.ret_ty.attr {
Some(attr) => unsafe {
llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr as u64);
},
None => {}
}
i += 1;
for &arg_ty in tys.fn_ty.arg_tys.iter() {
if arg_ty.is_ignore() {
continue;
}
// skip padding
if arg_ty.pad.is_some() { i += 1; }
match arg_ty.attr {
Some(attr) => unsafe {
llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr as u64);
},
None => ()
}
i += 1;
}
}
| 38.178096 | 99 | 0.536488 |
16985769edba055bc39ee17c1ee5a2cc76f9fbc8 | 5,198 | use std::fmt;
use traits;
/// A date as represented in FAT32 on-disk structures.
#[repr(C, packed)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
pub struct Date(u16);
impl Date {
pub fn year(&self) -> usize {
(self.0 >> 9) as usize + 1980
}
pub fn month(&self) -> u8 {
((self.0 >> 5) as u8) & 0b1111
}
pub fn day(&self) -> u8 {
(self.0 & 0b11111) as u8
}
}
/// Time as represented in FAT32 on-disk structures.
#[repr(C, packed)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
pub struct Time(u16);
impl Time {
pub fn hour(&self) -> u8 {
(self.0 >> 11) as u8
}
pub fn minute(&self) -> u8 {
((self.0 >> 5) as u8) & 0b111111
}
pub fn second(&self) -> u8 {
((self.0 as u8) & 0b11111) * 2
}
}
/// File attributes as represented in FAT32 on-disk structures.
#[repr(C, packed)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
pub struct Attributes(u8);
/// A structure containing a date and time.
#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]
pub struct Timestamp {
pub time: Time,
pub date: Date
}
/// Metadata for a directory entry.
#[derive(Default, Debug, Clone)]
pub struct Metadata {
attributes: Attributes,
created: Timestamp,
accessed: Timestamp,
modified: Timestamp,
}
impl Attributes {
const READ_ONLY: u8 = 0x01;
const HIDDEN: u8 = 0x02;
const SYSTEM: u8 = 0x04;
const VOLUME_ID: u8 = 0x08;
const DIRECTORY: u8 = 0x10;
const ARCHIVE: u8 = 0x20;
pub fn read_only(&self) -> bool {
(self.0 & Attributes::READ_ONLY) != 0
}
pub fn hidden(&self) -> bool {
(self.0 & Attributes::HIDDEN) != 0
}
pub fn system(&self) -> bool {
(self.0 & Attributes::SYSTEM) != 0
}
pub fn volume_id(&self) -> bool {
(self.0 & Attributes::VOLUME_ID) != 0
}
pub fn directory(&self) -> bool {
(self.0 & Attributes::DIRECTORY) != 0
}
pub fn archive(&self) -> bool {
(self.0 & Attributes::ARCHIVE) != 0
}
}
impl fmt::Display for Attributes {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let mut had_attribute = false;
let attributes = [self.read_only(), self.hidden(), self.system(),
self.volume_id(), self.directory(), self.archive()];
let attribute_names = ["READ_ONLY", "HIDDEN", "SYSTEM", "VOLUME_ID",
"DIRECTORY", "ARCHIVE"];
assert_eq!(attributes.len(), attribute_names.len());
for i in 0..attributes.len() {
if attributes[i] {
if had_attribute {
write!(f, "{}", "|")?;
}
write!(f, "{}", attribute_names[i].to_string())?;
had_attribute = true;
}
}
Ok(())
}
}
impl traits::Timestamp for Timestamp {
/// The calendar year.
///
/// The year is not offset. 2009 is 2009.
fn year(&self) -> usize {
self.date.year()
}
/// The calendar month, starting at 1 for January. Always in range [1, 12].
///
/// January is 1, Feburary is 2, ..., December is 12.
fn month(&self) -> u8 {
self.date.month()
}
/// The calendar day, starting at 1. Always in range [1, 31].
fn day(&self) -> u8 {
self.date.day()
}
/// The 24-hour hour. Always in range [0, 24).
fn hour(&self) -> u8 {
self.time.hour()
}
/// The minute. Always in range [0, 60).
fn minute(&self) -> u8 {
self.time.minute()
}
/// The second. Always in range [0, 60).
fn second(&self) -> u8 {
self.time.second()
}
}
impl fmt::Display for Timestamp {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use traits::Timestamp;
write!(f, "{}-{}-{} {}:{:02}:{:02}", self.year(), self.month(),
self.day(), self.hour(), self.minute(), self.second())
}
}
impl Metadata {
pub fn new(attributes: Attributes, created: Timestamp, accessed: Timestamp,
modified: Timestamp) -> Metadata {
Metadata { attributes, created, accessed, modified }
}
}
impl traits::Metadata for Metadata {
type Timestamp = Timestamp;
/// Whether the associated entry is read only.
fn read_only(&self) -> bool {
self.attributes.read_only()
}
/// Whether the entry should be "hidden" from directory traversals.
fn hidden(&self) -> bool {
self.attributes.hidden()
}
/// The timestamp when the entry was created.
fn created(&self) -> Self::Timestamp {
self.created
}
/// The timestamp for the entry's last access.
fn accessed(&self) -> Self::Timestamp {
self.accessed
}
/// The timestamp for the entry's last modification.
fn modified(&self) -> Self::Timestamp {
self.modified
}
}
impl fmt::Display for Metadata {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "attributes={} created={} accessed={} modified={}",
self.attributes, self.created, self.accessed, self.modified)
}
}
| 25.111111 | 79 | 0.555214 |
71beaabb93a7f194a8c9066b43be761fdfa70a6f | 439 | // (**) Drop every N'th element from a list.
fn drop_every<T> (v : &mut Vec<T>, n : i32) {
let mut i = 0;
let mut count = 0;
while i < v.len() {
count += 1;
if count % n == 0 {
v.remove(i);
} else {
i += 1;
}
}
}
fn main() {
let s = "abcdefghik";
let mut v = s.as_bytes().to_vec();
drop_every(&mut v, 3);
println!("{:?}", std::str::from_utf8(&v));
} | 20.904762 | 46 | 0.437358 |
f88ea71d346ab142f534f104e78ef7e5d5601274 | 4,546 | //! CurrencyId implementation
use codec::{CompactAs, Decode, Encode, MaxEncodedLen};
use composable_traits::currency::Exponent;
use core::ops::Div;
use scale_info::TypeInfo;
use sp_runtime::RuntimeDebug;
use composable_support::rpc_helpers::FromHexStr;
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
use sp_runtime::sp_std::ops::Deref;
/// Trait used to write generalized code over well know currencies
/// We use const to allow for match on these
/// Allows to have reuse of code amids runtime and cross relay transfers in future.
// TODO: split CurrenyId for runtimes - one for DOT and one for KSM
pub trait WellKnownCurrency {
// works well with pattnrs unlike impl trait `associated consts cannot be referenced in
// patterns`
const NATIVE: Self;
/// usually we expect running with relay,
/// but if not, than degenrative case would be this equal to `NATIVE`
const RELAY_NATIVE: Self;
}
#[derive(
Encode,
Decode,
MaxEncodedLen,
Eq,
PartialEq,
Copy,
Clone,
RuntimeDebug,
PartialOrd,
Ord,
TypeInfo,
CompactAs,
)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[repr(transparent)]
pub struct CurrencyId(pub u128);
impl WellKnownCurrency for CurrencyId {
const NATIVE: CurrencyId = CurrencyId::PICA;
const RELAY_NATIVE: CurrencyId = CurrencyId::KSM;
}
impl CurrencyId {
pub const INVALID: CurrencyId = CurrencyId(0);
pub const PICA: CurrencyId = CurrencyId(1);
pub const LAYR: CurrencyId = CurrencyId(2);
pub const CROWD_LOAN: CurrencyId = CurrencyId(3);
/// Kusama native token
pub const KSM: CurrencyId = CurrencyId(4);
/// Karura stable coin(Karura Dollar), not native.
#[allow(non_upper_case_globals)]
pub const kUSD: CurrencyId = CurrencyId(129);
#[inline(always)]
pub const fn decimals() -> Exponent {
12
}
pub fn unit<T: From<u64>>() -> T {
T::from(10_u64.pow(Self::decimals()))
}
pub fn milli<T: From<u64> + Div<Output = T>>() -> T {
Self::unit::<T>() / T::from(1000_u64)
}
}
impl FromHexStr for CurrencyId {
type Err = <u128 as FromHexStr>::Err;
fn from_hex_str(src: &str) -> core::result::Result<Self, Self::Err> {
u128::from_hex_str(src).map(CurrencyId)
}
}
impl core::fmt::LowerHex for CurrencyId {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::LowerHex::fmt(&self.0, f)
}
}
impl Default for CurrencyId {
#[inline]
fn default() -> Self {
CurrencyId::INVALID
}
}
impl Deref for CurrencyId {
type Target = u128;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<CurrencyId> for u128 {
#[inline]
fn from(id: CurrencyId) -> Self {
id.0
}
}
impl From<u128> for CurrencyId {
#[inline]
fn from(raw: u128) -> Self {
CurrencyId(raw)
}
}
/// maps id to junction generic key,
/// unfortunately it is the best way to encode currency id as of now in XCM
#[cfg(feature = "develop")]
impl From<CurrencyId> for xcm::latest::Junction {
fn from(this: CurrencyId) -> Self {
xcm::latest::Junction::GeneralKey(this.encode())
}
}
mod ops {
use super::CurrencyId;
use core::ops::{Add, Mul};
use sp_runtime::traits::{Bounded, CheckedAdd, CheckedMul, One, Saturating, Zero};
impl Add for CurrencyId {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
CurrencyId(self.0.add(rhs.0))
}
}
impl Mul for CurrencyId {
type Output = CurrencyId;
fn mul(self, rhs: Self) -> Self::Output {
CurrencyId(self.0.mul(rhs.0))
}
}
impl CheckedAdd for CurrencyId {
fn checked_add(&self, v: &Self) -> Option<Self> {
Some(CurrencyId(self.0.checked_add(v.0)?))
}
}
impl CheckedMul for CurrencyId {
fn checked_mul(&self, v: &Self) -> Option<Self> {
Some(CurrencyId(self.0.checked_mul(v.0)?))
}
}
impl Zero for CurrencyId {
fn zero() -> Self {
CurrencyId(0)
}
fn is_zero(&self) -> bool {
self.0.is_zero()
}
}
impl One for CurrencyId {
fn one() -> Self {
CurrencyId(u128::one())
}
}
impl Bounded for CurrencyId {
fn min_value() -> Self {
CurrencyId(u128::min_value())
}
fn max_value() -> Self {
CurrencyId(u128::max_value())
}
}
impl Saturating for CurrencyId {
fn saturating_add(self, rhs: Self) -> Self {
self.0.saturating_add(rhs.0).into()
}
fn saturating_sub(self, rhs: Self) -> Self {
<u128 as Saturating>::saturating_sub(self.0, rhs.0).into()
}
fn saturating_mul(self, rhs: Self) -> Self {
<u128 as Saturating>::saturating_mul(self.0, rhs.0).into()
}
fn saturating_pow(self, exp: usize) -> Self {
<u128 as Saturating>::saturating_pow(self.0, exp).into()
}
}
}
| 22.284314 | 88 | 0.675759 |
6212c4ffae30b1b22c6035b470b2887c446f6ce8 | 22,076 | use indexmap::map::{Entry, IndexMap};
use polars::chunked_array::object::builder::ObjectChunkedBuilder;
use polars::chunked_array::ChunkedArray;
use bigdecimal::{FromPrimitive, ToPrimitive};
use chrono::{DateTime, FixedOffset, NaiveDateTime};
use nu_errors::ShellError;
use nu_source::{Span, Tag};
use num_bigint::BigInt;
use polars::prelude::{
DataFrame, DataType, DatetimeChunked, Int64Type, IntoSeries, NamedFrom, NewChunkedArray,
ObjectType, PolarsNumericType, Series,
};
use std::ops::{Deref, DerefMut};
use super::NuDataFrame;
use crate::{Dictionary, Primitive, UntaggedValue, Value};
const SECS_PER_DAY: i64 = 86_400;
#[derive(Debug)]
pub struct Column {
name: String,
values: Vec<Value>,
}
impl Column {
pub fn new(name: String, values: Vec<Value>) -> Self {
Self { name, values }
}
pub fn new_empty(name: String) -> Self {
Self {
name,
values: Vec::new(),
}
}
pub fn name(&self) -> &str {
self.name.as_str()
}
pub fn iter(&self) -> impl Iterator<Item = &Value> {
self.values.iter()
}
}
impl IntoIterator for Column {
type Item = Value;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.values.into_iter()
}
}
impl Deref for Column {
type Target = Vec<Value>;
fn deref(&self) -> &Self::Target {
&self.values
}
}
impl DerefMut for Column {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.values
}
}
#[derive(Debug)]
pub enum InputType {
Integer,
Decimal,
String,
Boolean,
Object,
Date,
Duration,
}
#[derive(Debug)]
pub struct TypedColumn {
column: Column,
column_type: Option<InputType>,
}
impl TypedColumn {
fn new_empty(name: String) -> Self {
Self {
column: Column::new_empty(name),
column_type: None,
}
}
}
impl Deref for TypedColumn {
type Target = Column;
fn deref(&self) -> &Self::Target {
&self.column
}
}
impl DerefMut for TypedColumn {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.column
}
}
pub type ColumnMap = IndexMap<String, TypedColumn>;
pub fn create_column(
series: &Series,
from_row: usize,
to_row: usize,
) -> Result<Column, ShellError> {
let size = to_row - from_row;
match series.dtype() {
DataType::Null => {
let values = std::iter::repeat(Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
})
.take(size)
.collect::<Vec<Value>>();
Ok(Column::new(series.name().into(), values))
}
DataType::UInt8 => {
let casted = series.u8().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::UInt16 => {
let casted = series.u16().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::UInt32 => {
let casted = series.u32().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::UInt64 => {
let casted = series.u64().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Int8 => {
let casted = series.i8().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Int16 => {
let casted = series.i16().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Int32 => {
let casted = series.i32().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Int64 => {
let casted = series.i64().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Float32 => {
let casted = series.f32().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Float64 => {
let casted = series.f64().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
Ok(column_from_casted(casted, from_row, size))
}
DataType::Boolean => {
let casted = series.bool().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
let values = casted
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(a) => Value {
value: UntaggedValue::Primitive((a).into()),
tag: Tag::default(),
},
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Ok(Column::new(casted.name().into(), values))
}
DataType::Utf8 => {
let casted = series.utf8().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
let values = casted
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(a) => Value {
value: UntaggedValue::Primitive((a).into()),
tag: Tag::default(),
},
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Ok(Column::new(casted.name().into(), values))
}
DataType::Object(_) => {
let casted = series
.as_any()
.downcast_ref::<ChunkedArray<ObjectType<Value>>>();
match casted {
None => Err(ShellError::labeled_error(
"Format not supported",
"Value not supported for conversion",
Tag::unknown(),
)),
Some(ca) => {
let values = ca
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(a) => a.clone(),
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Ok(Column::new(ca.name().into(), values))
}
}
}
DataType::Date => {
let casted = series.date().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
let values = casted
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(a) => {
// elapsed time in day since 1970-01-01
let seconds = a as i64 * SECS_PER_DAY;
let naive_datetime = NaiveDateTime::from_timestamp(seconds, 0);
// Zero length offset
let offset = FixedOffset::east(0);
let datetime = DateTime::<FixedOffset>::from_utc(naive_datetime, offset);
Value {
value: UntaggedValue::Primitive(Primitive::Date(datetime)),
tag: Tag::default(),
}
}
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Ok(Column::new(casted.name().into(), values))
}
DataType::Datetime => {
let casted = series.datetime().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
let values = casted
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(a) => {
// elapsed time in milliseconds since 1970-01-01
let seconds = a / 1000;
let naive_datetime = NaiveDateTime::from_timestamp(seconds, 0);
// Zero length offset
let offset = FixedOffset::east(0);
let datetime = DateTime::<FixedOffset>::from_utc(naive_datetime, offset);
Value {
value: UntaggedValue::Primitive(Primitive::Date(datetime)),
tag: Tag::default(),
}
}
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Ok(Column::new(casted.name().into(), values))
}
DataType::Time => {
let casted = series.time().map_err(|e| {
ShellError::labeled_error(
"Casting error",
format!("casting error: {}", e),
Span::default(),
)
})?;
let values = casted
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(nanoseconds) => {
let untagged = if let Some(bigint) = BigInt::from_i64(nanoseconds) {
UntaggedValue::Primitive(Primitive::Duration(bigint))
} else {
unreachable!("Internal error: protocol did not use compatible decimal")
};
Value {
value: untagged,
tag: Tag::default(),
}
}
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Ok(Column::new(casted.name().into(), values))
}
e => Err(ShellError::labeled_error(
"Format not supported",
format!("Value not supported for conversion: {}", e),
Tag::unknown(),
)),
}
}
fn column_from_casted<T>(casted: &ChunkedArray<T>, from_row: usize, size: usize) -> Column
where
T: PolarsNumericType,
T::Native: Into<Primitive>,
{
let values = casted
.into_iter()
.skip(from_row)
.take(size)
.map(|v| match v {
Some(a) => Value {
value: UntaggedValue::Primitive((a).into()),
tag: Tag::default(),
},
None => Value {
value: UntaggedValue::Primitive(Primitive::Nothing),
tag: Tag::default(),
},
})
.collect::<Vec<Value>>();
Column::new(casted.name().into(), values)
}
// Adds a separator to the vector of values using the column names from the
// dataframe to create the Values Row
pub fn add_separator(values: &mut Vec<Value>, df: &DataFrame) {
let column_names = df.get_column_names();
let mut dictionary = Dictionary::default();
for name in column_names {
let indicator = Value {
value: UntaggedValue::Primitive(Primitive::String("...".to_string())),
tag: Tag::unknown(),
};
dictionary.insert(name.to_string(), indicator);
}
let extra_column = Value {
value: UntaggedValue::Row(dictionary),
tag: Tag::unknown(),
};
values.push(extra_column);
}
// Inserting the values found in a UntaggedValue::Row
// All the entries for the dictionary are checked in order to check if
// the column values have the same type value.
pub fn insert_row(column_values: &mut ColumnMap, dictionary: Dictionary) -> Result<(), ShellError> {
for (key, value) in dictionary.entries {
insert_value(value, key, column_values)?;
}
Ok(())
}
// Inserting the values found in a UntaggedValue::Table
// All the entries for the table are checked in order to check if
// the column values have the same type value.
// The names for the columns are the enumerated numbers from the values
pub fn insert_table(column_values: &mut ColumnMap, table: Vec<Value>) -> Result<(), ShellError> {
for (index, value) in table.into_iter().enumerate() {
let key = index.to_string();
insert_value(value, key, column_values)?;
}
Ok(())
}
pub fn insert_value(
value: Value,
key: String,
column_values: &mut ColumnMap,
) -> Result<(), ShellError> {
let col_val = match column_values.entry(key.clone()) {
Entry::Vacant(entry) => entry.insert(TypedColumn::new_empty(key)),
Entry::Occupied(entry) => entry.into_mut(),
};
// Checking that the type for the value is the same
// for the previous value in the column
if col_val.values.is_empty() {
match &value.value {
UntaggedValue::Primitive(Primitive::Int(_)) => {
col_val.column_type = Some(InputType::Integer);
}
UntaggedValue::Primitive(Primitive::Decimal(_)) => {
col_val.column_type = Some(InputType::Decimal);
}
UntaggedValue::Primitive(Primitive::String(_)) => {
col_val.column_type = Some(InputType::String);
}
UntaggedValue::Primitive(Primitive::Boolean(_)) => {
col_val.column_type = Some(InputType::Boolean);
}
UntaggedValue::Primitive(Primitive::Date(_)) => {
col_val.column_type = Some(InputType::Date);
}
UntaggedValue::Primitive(Primitive::Duration(_)) => {
col_val.column_type = Some(InputType::Duration);
}
_ => col_val.column_type = Some(InputType::Object),
}
col_val.values.push(value);
} else {
let prev_value = &col_val.values[col_val.values.len() - 1];
match (&prev_value.value, &value.value) {
(
UntaggedValue::Primitive(Primitive::Int(_)),
UntaggedValue::Primitive(Primitive::Int(_)),
)
| (
UntaggedValue::Primitive(Primitive::Decimal(_)),
UntaggedValue::Primitive(Primitive::Decimal(_)),
)
| (
UntaggedValue::Primitive(Primitive::String(_)),
UntaggedValue::Primitive(Primitive::String(_)),
)
| (
UntaggedValue::Primitive(Primitive::Boolean(_)),
UntaggedValue::Primitive(Primitive::Boolean(_)),
)
| (
UntaggedValue::Primitive(Primitive::Date(_)),
UntaggedValue::Primitive(Primitive::Date(_)),
)
| (
UntaggedValue::Primitive(Primitive::Duration(_)),
UntaggedValue::Primitive(Primitive::Duration(_)),
) => col_val.values.push(value),
_ => {
col_val.column_type = Some(InputType::Object);
col_val.values.push(value);
}
}
}
Ok(())
}
// The ColumnMap has the parsed data from the StreamInput
// This data can be used to create a Series object that can initialize
// the dataframe based on the type of data that is found
pub fn from_parsed_columns(
column_values: ColumnMap,
span: &Span,
) -> Result<NuDataFrame, ShellError> {
let mut df_series: Vec<Series> = Vec::new();
for (name, column) in column_values {
if let Some(column_type) = &column.column_type {
match column_type {
InputType::Decimal => {
let series_values: Result<Vec<_>, _> =
column.values.iter().map(|v| v.as_f64()).collect();
let series = Series::new(&name, series_values?);
df_series.push(series)
}
InputType::Integer => {
let series_values: Result<Vec<_>, _> =
column.values.iter().map(|v| v.as_i64()).collect();
let series = Series::new(&name, series_values?);
df_series.push(series)
}
InputType::String => {
let series_values: Result<Vec<_>, _> =
column.values.iter().map(|v| v.as_string()).collect();
let series = Series::new(&name, series_values?);
df_series.push(series)
}
InputType::Boolean => {
let series_values: Result<Vec<_>, _> =
column.values.iter().map(|v| v.as_bool()).collect();
let series = Series::new(&name, series_values?);
df_series.push(series)
}
InputType::Object => {
let mut builder =
ObjectChunkedBuilder::<Value>::new(&name, column.values.len());
for v in &column.values {
builder.append_value(v.clone());
}
let res = builder.finish();
df_series.push(res.into_series())
}
InputType::Date => {
let it = column.values.iter().map(|v| {
if let UntaggedValue::Primitive(Primitive::Date(date)) = &v.value {
Some(date.timestamp_millis())
} else {
None
}
});
let res: DatetimeChunked =
ChunkedArray::<Int64Type>::new_from_opt_iter(&name, it).into();
df_series.push(res.into_series())
}
InputType::Duration => {
let it = column.values.iter().map(|v| {
if let UntaggedValue::Primitive(Primitive::Duration(duration)) = &v.value {
Some(duration.to_i64().expect("Not expecting NAN in duration"))
} else {
None
}
});
let res = ChunkedArray::<Int64Type>::new_from_opt_iter(&name, it);
df_series.push(res.into_series())
}
}
}
}
let df = DataFrame::new(df_series);
match df {
Ok(df) => Ok(NuDataFrame::new(df)),
Err(e) => Err(ShellError::labeled_error(
"Error while creating dataframe",
e.to_string(),
span,
)),
}
}
| 33.347432 | 100 | 0.459594 |
26344b67b94f052011e0163e9eba016ce05475ea | 21,948 | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Per-connection configuration parameters and state.
#![warn(missing_docs)]
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::mem;
use chrono::{DateTime, Utc};
use derivative::Derivative;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::OwnedMutexGuard;
use expr::GlobalId;
use pgrepr::Format;
use repr::{Datum, Diff, Row, ScalarType, Timestamp};
use sql::ast::{Raw, Statement};
use sql::plan::{Params, PlanContext, StatementDesc};
use crate::error::CoordError;
mod vars;
pub use self::vars::{Var, Vars};
const DUMMY_CONNECTION_ID: u32 = 0;
/// A session holds per-connection state.
#[derive(Debug)]
pub struct Session {
conn_id: u32,
prepared_statements: HashMap<String, PreparedStatement>,
portals: HashMap<String, Portal>,
transaction: TransactionStatus,
pcx: Option<PlanContext>,
user: String,
vars: Vars,
drop_sinks: Vec<GlobalId>,
}
impl Session {
/// Creates a new session for the specified connection ID.
pub fn new(conn_id: u32, user: String) -> Session {
assert_ne!(conn_id, DUMMY_CONNECTION_ID);
Self::new_internal(conn_id, user)
}
/// Creates a new dummy session.
///
/// Dummy sessions are intended for use when executing queries on behalf of
/// the system itself, rather than on behalf of a user.
pub fn dummy() -> Session {
Self::new_internal(DUMMY_CONNECTION_ID, "mz_system".into())
}
fn new_internal(conn_id: u32, user: String) -> Session {
Session {
conn_id,
transaction: TransactionStatus::Default,
pcx: None,
prepared_statements: HashMap::new(),
portals: HashMap::new(),
user,
vars: Vars::default(),
drop_sinks: vec![],
}
}
/// Returns the connection ID associated with the session.
pub fn conn_id(&self) -> u32 {
self.conn_id
}
/// Returns the current transaction's PlanContext. Panics if there is not a
/// current transaction.
pub fn pcx(&self) -> &PlanContext {
&self.transaction().inner().unwrap().pcx
}
/// Starts an explicit transaction, or changes an implicit to an explicit
/// transaction.
pub fn start_transaction(mut self, wall_time: DateTime<Utc>) -> Self {
match self.transaction {
TransactionStatus::Default | TransactionStatus::Started(_) => {
self.transaction = TransactionStatus::InTransaction(Transaction {
pcx: PlanContext::new(wall_time),
ops: TransactionOps::None,
write_lock_guard: None,
});
}
TransactionStatus::InTransactionImplicit(txn) => {
self.transaction = TransactionStatus::InTransaction(txn);
}
TransactionStatus::InTransaction(_) => {}
TransactionStatus::Failed(_) => unreachable!(),
};
self
}
/// Starts either a single statement or implicit transaction based on the
/// number of statements, but only if no transaction has been started already.
pub fn start_transaction_implicit(mut self, wall_time: DateTime<Utc>, stmts: usize) -> Self {
if let TransactionStatus::Default = self.transaction {
let txn = Transaction {
pcx: PlanContext::new(wall_time),
ops: TransactionOps::None,
write_lock_guard: None,
};
match stmts {
1 => self.transaction = TransactionStatus::Started(txn),
n if n > 1 => self.transaction = TransactionStatus::InTransactionImplicit(txn),
_ => {}
}
}
self
}
/// Clears a transaction, setting its state to Default and destroying all
/// portals. Returned are:
/// - sinks that were started in this transaction and need to be dropped
/// - the cleared transaction so its operations can be handled
///
/// The [Postgres protocol docs](https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY) specify:
/// > a named portal object lasts till the end of the current transaction
/// and
/// > An unnamed portal is destroyed at the end of the transaction
#[must_use]
pub fn clear_transaction(&mut self) -> (Vec<GlobalId>, TransactionStatus) {
self.portals.clear();
self.pcx = None;
let drop_sinks = mem::take(&mut self.drop_sinks);
let txn = mem::take(&mut self.transaction);
(drop_sinks, txn)
}
/// Marks the current transaction as failed.
pub fn fail_transaction(mut self) -> Self {
match self.transaction {
TransactionStatus::Default => unreachable!(),
TransactionStatus::Started(txn)
| TransactionStatus::InTransactionImplicit(txn)
| TransactionStatus::InTransaction(txn) => {
self.transaction = TransactionStatus::Failed(txn);
}
TransactionStatus::Failed(_) => {}
};
self
}
/// Returns the current transaction status.
pub fn transaction(&self) -> &TransactionStatus {
&self.transaction
}
/// Adds operations to the current transaction. An error is produced if they
/// cannot be merged (i.e., a read cannot be merged to an insert).
pub fn add_transaction_ops(&mut self, add_ops: TransactionOps) -> Result<(), CoordError> {
match &mut self.transaction {
TransactionStatus::Started(Transaction { ops, .. })
| TransactionStatus::InTransaction(Transaction { ops, .. })
| TransactionStatus::InTransactionImplicit(Transaction { ops, .. }) => match ops {
TransactionOps::None => *ops = add_ops,
TransactionOps::Peeks(txn_ts) => match add_ops {
TransactionOps::Peeks(add_ts) => {
assert_eq!(*txn_ts, add_ts);
}
_ => return Err(CoordError::ReadOnlyTransaction),
},
TransactionOps::Tail => return Err(CoordError::TailOnlyTransaction),
TransactionOps::Writes(txn_writes) => match add_ops {
TransactionOps::Writes(mut add_writes) => {
txn_writes.append(&mut add_writes);
}
_ => {
return Err(CoordError::WriteOnlyTransaction);
}
},
},
TransactionStatus::Default | TransactionStatus::Failed(_) => {
unreachable!()
}
}
Ok(())
}
/// Adds a sink that will need to be dropped when the current transaction is
/// cleared.
pub fn add_drop_sink(&mut self, name: GlobalId) {
self.drop_sinks.push(name);
}
/// Assumes an active transaction. Returns its read timestamp. Errors if not
/// a read transaction. Calls get_ts to get a timestamp if the transaction
/// doesn't have an operation yet, converting the transaction to a read.
pub fn get_transaction_timestamp<F: FnMut() -> Result<Timestamp, CoordError>>(
&mut self,
mut get_ts: F,
) -> Result<Timestamp, CoordError> {
// If the transaction already has a peek timestamp, use it. Otherwise generate
// one. We generate one even though we could check here that the transaction
// isn't in some other conflicting state because we want all of that logic to
// reside in add_transaction_ops.
let ts = match self.transaction.inner() {
Some(Transaction {
pcx: _,
ops: TransactionOps::Peeks(ts),
write_lock_guard: _,
}) => *ts,
_ => get_ts()?,
};
self.add_transaction_ops(TransactionOps::Peeks(ts))?;
Ok(ts)
}
/// Registers the prepared statement under `name`.
pub fn set_prepared_statement(&mut self, name: String, statement: PreparedStatement) {
self.prepared_statements.insert(name, statement);
}
/// Removes the prepared statement associated with `name`.
///
/// Returns whether a statement previously existed.
pub fn remove_prepared_statement(&mut self, name: &str) -> bool {
self.prepared_statements.remove(name).is_some()
}
/// Removes all prepared statements.
pub fn remove_all_prepared_statements(&mut self) {
self.prepared_statements.clear();
}
/// Retrieves the prepared statement associated with `name`.
///
/// This is unverified and could be incorrect if the underlying catalog has
/// changed.
pub fn get_prepared_statement_unverified(&self, name: &str) -> Option<&PreparedStatement> {
self.prepared_statements.get(name)
}
/// Retrieves the prepared statement associated with `name`.
///
/// This is unverified and could be incorrect if the underlying catalog has
/// changed.
pub fn get_prepared_statement_mut_unverified(
&mut self,
name: &str,
) -> Option<&mut PreparedStatement> {
self.prepared_statements.get_mut(name)
}
/// Returns the prepared statements for the session.
pub fn prepared_statements(&self) -> &HashMap<String, PreparedStatement> {
&self.prepared_statements
}
/// Binds the specified portal to the specified prepared statement.
///
/// If the prepared statement contains parameters, the values and types of
/// those parameters must be provided in `params`. It is the caller's
/// responsibility to ensure that the correct number of parameters is
/// provided.
///
// The `results_formats` parameter sets the desired format of the results,
/// and is stored on the portal.
pub fn set_portal(
&mut self,
portal_name: String,
desc: StatementDesc,
stmt: Option<Statement<Raw>>,
params: Vec<(Datum, ScalarType)>,
result_formats: Vec<pgrepr::Format>,
) -> Result<(), CoordError> {
// The empty portal can be silently replaced.
if !portal_name.is_empty() && self.portals.contains_key(&portal_name) {
return Err(CoordError::DuplicateCursor(portal_name));
}
self.portals.insert(
portal_name,
Portal {
stmt,
desc,
parameters: Params {
datums: Row::pack(params.iter().map(|(d, _t)| d)),
types: params.into_iter().map(|(_d, t)| t).collect(),
},
result_formats: result_formats.into_iter().map(Into::into).collect(),
state: PortalState::NotStarted,
},
);
Ok(())
}
/// Removes the specified portal.
///
/// If there is no such portal, this method does nothing. Returns whether that portal existed.
pub fn remove_portal(&mut self, portal_name: &str) -> bool {
self.portals.remove(portal_name).is_some()
}
/// Retrieves a reference to the specified portal.
///
/// If there is no such portal, returns `None`.
pub fn get_portal(&self, portal_name: &str) -> Option<&Portal> {
self.portals.get(portal_name)
}
/// Retrieves a mutable reference to the specified portal.
///
/// If there is no such portal, returns `None`.
pub fn get_portal_mut(&mut self, portal_name: &str) -> Option<&mut Portal> {
self.portals.get_mut(portal_name)
}
/// Creates and installs a new portal.
pub fn create_new_portal(
&mut self,
stmt: Option<Statement<Raw>>,
desc: StatementDesc,
parameters: Params,
result_formats: Vec<Format>,
) -> Result<String, CoordError> {
// See: https://github.com/postgres/postgres/blob/84f5c2908dad81e8622b0406beea580e40bb03ac/src/backend/utils/mmgr/portalmem.c#L234
for i in 0usize.. {
let name = format!("<unnamed portal {}>", i);
match self.portals.entry(name.clone()) {
Entry::Occupied(_) => continue,
Entry::Vacant(entry) => {
entry.insert(Portal {
stmt,
desc,
parameters,
result_formats,
state: PortalState::NotStarted,
});
return Ok(name);
}
}
}
coord_bail!("unable to create a new portal");
}
/// Resets the session to its initial state. Returns sinks that need to be
/// dropped.
pub fn reset(&mut self) -> Vec<GlobalId> {
let (drop_sinks, _) = self.clear_transaction();
self.prepared_statements.clear();
self.vars = Vars::default();
drop_sinks
}
/// Returns the name of the user who owns this session.
pub fn user(&self) -> &str {
&self.user
}
/// Returns a reference to the variables in this session.
pub fn vars(&self) -> &Vars {
&self.vars
}
/// Returns a mutable reference to the variables in this session.
pub fn vars_mut(&mut self) -> &mut Vars {
&mut self.vars
}
/// Grants the coordinator's write lock guard to this session's inner
/// transaction.
///
/// # Panics
/// If the inner transaction is idle. See
/// [`TransactionStatus::grant_write_lock`].
pub fn grant_write_lock(&mut self, guard: OwnedMutexGuard<()>) {
self.transaction.grant_write_lock(guard);
}
/// Returns whether or not this session currently holds the write lock.
pub fn has_write_lock(&self) -> bool {
match self.transaction.inner() {
None => false,
Some(txn) => txn.write_lock_guard.is_some(),
}
}
}
/// A prepared statement.
#[derive(Debug)]
pub struct PreparedStatement {
sql: Option<Statement<Raw>>,
desc: StatementDesc,
/// The most recent catalog revision that has verified this statement.
pub catalog_revision: u64,
}
impl PreparedStatement {
/// Constructs a new prepared statement.
pub fn new(
sql: Option<Statement<Raw>>,
desc: StatementDesc,
catalog_revision: u64,
) -> PreparedStatement {
PreparedStatement {
sql,
desc,
catalog_revision,
}
}
/// Returns the raw SQL string associated with this prepared statement,
/// if the prepared statement was not the empty query.
pub fn sql(&self) -> Option<&Statement<Raw>> {
self.sql.as_ref()
}
/// Returns the description of the prepared statement.
pub fn desc(&self) -> &StatementDesc {
&self.desc
}
}
/// A portal represents the execution state of a running or runnable query.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Portal {
/// The statement that is bound to this portal.
pub stmt: Option<Statement<Raw>>,
/// The statement description.
pub desc: StatementDesc,
/// The bound values for the parameters in the prepared statement, if any.
pub parameters: Params,
/// The desired output format for each column in the result set.
pub result_formats: Vec<pgrepr::Format>,
/// The execution state of the portal.
#[derivative(Debug = "ignore")]
pub state: PortalState,
}
/// Execution states of a portal.
pub enum PortalState {
/// Portal not yet started.
NotStarted,
/// Portal is a rows-returning statement in progress with 0 or more rows
/// remaining.
InProgress(Option<InProgressRows>),
/// Portal has completed and should not be re-executed. If the optional string
/// is present, it is returned as a CommandComplete tag, otherwise an error
/// is sent.
Completed(Option<String>),
}
/// State of an in-progress, rows-returning portal.
pub struct InProgressRows {
/// The current batch of rows.
pub current: Option<Vec<Row>>,
/// A stream from which to fetch more row batches.
pub remaining: RowBatchStream,
}
impl InProgressRows {
/// Creates a new InProgressRows from a batch stream.
pub fn new(remaining: RowBatchStream) -> Self {
Self {
current: None,
remaining,
}
}
/// Creates a new InProgressRows from a single batch of rows.
pub fn single_batch(rows: Vec<Row>) -> Self {
let (_tx, rx) = unbounded_channel();
Self {
current: Some(rows),
remaining: rx,
}
}
}
/// A channel of batched rows.
pub type RowBatchStream = UnboundedReceiver<Vec<Row>>;
/// The transaction status of a session.
///
/// PostgreSQL's transaction states are in backend/access/transam/xact.c.
#[derive(Debug)]
pub enum TransactionStatus {
/// Idle. Matches `TBLOCK_DEFAULT`.
Default,
/// Running a single-query transaction. Matches `TBLOCK_STARTED`.
Started(Transaction),
/// Currently in a transaction issued from a `BEGIN`. Matches `TBLOCK_INPROGRESS`.
InTransaction(Transaction),
/// Currently in an implicit transaction started from a multi-statement query
/// with more than 1 statements. Matches `TBLOCK_IMPLICIT_INPROGRESS`.
InTransactionImplicit(Transaction),
/// In a failed transaction that was started explicitly (i.e., previously
/// InTransaction). We do not use Failed for implicit transactions because
/// those cleanup after themselves. Matches `TBLOCK_ABORT`.
Failed(Transaction),
}
impl TransactionStatus {
/// Extracts the inner transaction ops if not failed.
pub fn into_ops(self) -> Option<TransactionOps> {
match self {
TransactionStatus::Default | TransactionStatus::Failed(_) => None,
TransactionStatus::Started(txn)
| TransactionStatus::InTransaction(txn)
| TransactionStatus::InTransactionImplicit(txn) => Some(txn.ops),
}
}
/// Exposes the inner transaction.
pub fn inner(&self) -> Option<&Transaction> {
match self {
TransactionStatus::Default => None,
TransactionStatus::Started(txn)
| TransactionStatus::InTransaction(txn)
| TransactionStatus::InTransactionImplicit(txn)
| TransactionStatus::Failed(txn) => Some(txn),
}
}
/// Expresses whether or not the transaction was implicitly started.
/// However, its negation does not imply explicitly started.
pub fn is_implicit(&self) -> bool {
match self {
TransactionStatus::Started(_) | TransactionStatus::InTransactionImplicit(_) => true,
TransactionStatus::Default
| TransactionStatus::InTransaction(_)
| TransactionStatus::Failed(_) => false,
}
}
/// Grants the write lock to the inner transaction.
///
/// # Panics
/// If `self` is `TransactionStatus::Default`, which indicates that the
/// transaction is idle, which is not appropriate to assign the
/// coordinator's write lock to.
pub fn grant_write_lock(&mut self, guard: OwnedMutexGuard<()>) {
match self {
TransactionStatus::Default => panic!("cannot grant write lock to txn not yet started"),
TransactionStatus::Started(txn)
| TransactionStatus::InTransaction(txn)
| TransactionStatus::InTransactionImplicit(txn)
| TransactionStatus::Failed(txn) => txn.grant_write_lock(guard),
}
}
}
impl Default for TransactionStatus {
fn default() -> Self {
TransactionStatus::Default
}
}
/// State data for transactions.
#[derive(Debug)]
pub struct Transaction {
/// Plan context.
pub pcx: PlanContext,
/// Transaction operations.
pub ops: TransactionOps,
/// Holds the coordinator's write lock.
write_lock_guard: Option<OwnedMutexGuard<()>>,
}
impl Transaction {
/// Grants the write lock to this transaction for the remainder of its lifetime.
fn grant_write_lock(&mut self, guard: OwnedMutexGuard<()>) {
self.write_lock_guard = Some(guard);
}
}
/// The type of operation being performed by the transaction.
///
/// This is needed because we currently do not allow mixing reads and writes in
/// a transaction. Use this to record what we have done, and what may need to
/// happen at commit.
#[derive(Debug, Clone, PartialEq)]
pub enum TransactionOps {
/// The transaction has been initiated, but no statement has yet been executed
/// in it.
None,
/// This transaction has had a peek (`SELECT`, `TAIL`) and must only do other peeks.
Peeks(Timestamp),
/// This transaction has done a TAIL and must do nothing else.
Tail,
/// This transaction has had a write (`INSERT`, `UPDATE`, `DELETE`) and must only do
/// other writes.
Writes(Vec<WriteOp>),
}
/// An `INSERT` waiting to be committed.
#[derive(Debug, Clone, PartialEq)]
pub struct WriteOp {
/// The target table.
pub id: GlobalId,
/// The data rows.
pub rows: Vec<(Row, Diff)>,
}
/// The action to take during end_transaction.
#[derive(Debug, PartialEq, Eq)]
pub enum EndTransactionAction {
/// Commit the transaction.
Commit,
/// Rollback the transaction.
Rollback,
}
impl EndTransactionAction {
/// Returns the pgwire tag for this action.
pub fn tag(&self) -> &'static str {
match self {
EndTransactionAction::Commit => "COMMIT",
EndTransactionAction::Rollback => "ROLLBACK",
}
}
}
| 34.782884 | 138 | 0.617551 |
9cb880267660fd943ab96cd236473aa7b478313b | 7,555 | use dashmap::{mapref::entry::Entry::Occupied, DashMap};
use safecoin_sdk::{pubkey::Pubkey, timing::AtomicInterval};
use std::{
collections::HashSet,
fmt::Debug,
sync::{
atomic::{AtomicU64, Ordering},
RwLock,
},
};
// The only cases where an inner key should map to a different outer key is
// if the key had different account data for the indexed key across different
// slots. As this is rare, it should be ok to use a Vec here over a HashSet, even
// though we are running some key existence checks.
pub type SecondaryReverseIndexEntry = RwLock<Vec<Pubkey>>;
pub trait SecondaryIndexEntry: Debug {
fn insert_if_not_exists(&self, key: &Pubkey, inner_keys_count: &AtomicU64);
// Removes a value from the set. Returns whether the value was present in the set.
fn remove_inner_key(&self, key: &Pubkey) -> bool;
fn is_empty(&self) -> bool;
fn keys(&self) -> Vec<Pubkey>;
fn len(&self) -> usize;
}
#[derive(Debug, Default)]
pub struct SecondaryIndexStats {
last_report: AtomicInterval,
num_inner_keys: AtomicU64,
}
#[derive(Debug, Default)]
pub struct DashMapSecondaryIndexEntry {
account_keys: DashMap<Pubkey, ()>,
}
impl SecondaryIndexEntry for DashMapSecondaryIndexEntry {
fn insert_if_not_exists(&self, key: &Pubkey, inner_keys_count: &AtomicU64) {
if self.account_keys.get(key).is_none() {
self.account_keys.entry(*key).or_insert_with(|| {
inner_keys_count.fetch_add(1, Ordering::Relaxed);
});
}
}
fn remove_inner_key(&self, key: &Pubkey) -> bool {
self.account_keys.remove(key).is_some()
}
fn is_empty(&self) -> bool {
self.account_keys.is_empty()
}
fn keys(&self) -> Vec<Pubkey> {
self.account_keys
.iter()
.map(|entry_ref| *entry_ref.key())
.collect()
}
fn len(&self) -> usize {
self.account_keys.len()
}
}
#[derive(Debug, Default)]
pub struct RwLockSecondaryIndexEntry {
account_keys: RwLock<HashSet<Pubkey>>,
}
impl SecondaryIndexEntry for RwLockSecondaryIndexEntry {
fn insert_if_not_exists(&self, key: &Pubkey, inner_keys_count: &AtomicU64) {
let exists = self.account_keys.read().unwrap().contains(key);
if !exists {
let mut w_account_keys = self.account_keys.write().unwrap();
w_account_keys.insert(*key);
inner_keys_count.fetch_add(1, Ordering::Relaxed);
};
}
fn remove_inner_key(&self, key: &Pubkey) -> bool {
self.account_keys.write().unwrap().remove(key)
}
fn is_empty(&self) -> bool {
self.account_keys.read().unwrap().is_empty()
}
fn keys(&self) -> Vec<Pubkey> {
self.account_keys.read().unwrap().iter().cloned().collect()
}
fn len(&self) -> usize {
self.account_keys.read().unwrap().len()
}
}
#[derive(Debug, Default)]
pub struct SecondaryIndex<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send> {
metrics_name: &'static str,
// Map from index keys to index values
pub index: DashMap<Pubkey, SecondaryIndexEntryType>,
pub reverse_index: DashMap<Pubkey, SecondaryReverseIndexEntry>,
stats: SecondaryIndexStats,
}
impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
SecondaryIndex<SecondaryIndexEntryType>
{
pub fn new(metrics_name: &'static str) -> Self {
Self {
metrics_name,
..Self::default()
}
}
pub fn insert(&self, key: &Pubkey, inner_key: &Pubkey) {
{
let pubkeys_map = self.index.get(key).unwrap_or_else(|| {
self.index
.entry(*key)
.or_insert(SecondaryIndexEntryType::default())
.downgrade()
});
pubkeys_map.insert_if_not_exists(inner_key, &self.stats.num_inner_keys);
}
let outer_keys = self.reverse_index.get(inner_key).unwrap_or_else(|| {
self.reverse_index
.entry(*inner_key)
.or_insert(RwLock::new(Vec::with_capacity(1)))
.downgrade()
});
let should_insert = !outer_keys.read().unwrap().contains(key);
if should_insert {
let mut w_outer_keys = outer_keys.write().unwrap();
if !w_outer_keys.contains(key) {
w_outer_keys.push(*key);
}
}
if self.stats.last_report.should_update(1000) {
datapoint_info!(
self.metrics_name,
("num_secondary_keys", self.index.len() as i64, i64),
(
"num_inner_keys",
self.stats.num_inner_keys.load(Ordering::Relaxed) as i64,
i64
),
(
"num_reverse_index_keys",
self.reverse_index.len() as i64,
i64
),
);
}
}
// Only safe to call from `remove_by_inner_key()` due to asserts
fn remove_index_entries(&self, outer_key: &Pubkey, removed_inner_key: &Pubkey) {
let is_outer_key_empty = {
let inner_key_map = self
.index
.get_mut(outer_key)
.expect("If we're removing a key, then it must have an entry in the map");
// If we deleted a pubkey from the reverse_index, then the corresponding entry
// better exist in this index as well or the two indexes are out of sync!
assert!(inner_key_map.value().remove_inner_key(removed_inner_key));
inner_key_map.is_empty()
};
// Delete the `key` if the set of inner keys is empty
if is_outer_key_empty {
// Other threads may have interleaved writes to this `key`,
// so double-check again for its emptiness
if let Occupied(key_entry) = self.index.entry(*outer_key) {
if key_entry.get().is_empty() {
key_entry.remove();
}
}
}
}
pub fn remove_by_inner_key(&self, inner_key: &Pubkey) {
// Save off which keys in `self.index` had slots removed so we can remove them
// after we purge the reverse index
let mut removed_outer_keys: HashSet<Pubkey> = HashSet::new();
// Check if the entry for `inner_key` in the reverse index is empty
// and can be removed
if let Some((_, outer_keys_set)) = self.reverse_index.remove(inner_key) {
for removed_outer_key in outer_keys_set.into_inner().unwrap().into_iter() {
removed_outer_keys.insert(removed_outer_key);
}
}
// Remove this value from those keys
for outer_key in &removed_outer_keys {
self.remove_index_entries(outer_key, inner_key);
}
// Safe to `fetch_sub()` here because a dead key cannot be removed more than once,
// and the `num_inner_keys` must have been incremented by exactly removed_outer_keys.len()
// in previous unique insertions of `inner_key` into `self.index` for each key
// in `removed_outer_keys`
self.stats
.num_inner_keys
.fetch_sub(removed_outer_keys.len() as u64, Ordering::Relaxed);
}
pub fn get(&self, key: &Pubkey) -> Vec<Pubkey> {
if let Some(inner_keys_map) = self.index.get(key) {
inner_keys_map.keys()
} else {
vec![]
}
}
}
| 33.878924 | 98 | 0.597353 |
fe91c3d15e652964646f085a5a25a86958b6a8b5 | 6,033 | // Copyright 2021, The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::io;
use super::{
absolute_height::AbsoluteHeightFilter,
and::AndFilter,
field_eq::FieldEqFilter,
fields_hashed_eq::FieldsHashedEqFilter,
fields_preserved::FieldsPreservedFilter,
identity::IdentityFilter,
not::NotFilter,
or::OrFilter,
output_hash_eq::OutputHashEqFilter,
xor::XorFilter,
};
use crate::covenants::{
byte_codes,
context::CovenantContext,
decoder::CovenantDecodeError,
encoder::CovenentWriteExt,
error::CovenantError,
output_set::OutputSet,
};
pub trait Filter {
fn filter(&self, context: &mut CovenantContext<'_>, output_set: &mut OutputSet<'_>) -> Result<(), CovenantError>;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CovenantFilter {
Identity(IdentityFilter),
And(AndFilter),
Or(OrFilter),
Xor(XorFilter),
Not(NotFilter),
OutputHashEq(OutputHashEqFilter),
FieldsPreserved(FieldsPreservedFilter),
FieldEq(FieldEqFilter),
FieldsHashedEq(FieldsHashedEqFilter),
AbsoluteHeight(AbsoluteHeightFilter),
}
impl CovenantFilter {
pub fn is_valid_code(code: u8) -> bool {
byte_codes::is_valid_filter_code(code)
}
pub fn write_to<W: io::Write>(&self, writer: &mut W) -> Result<usize, io::Error> {
writer.write_u8_fixed(self.as_byte_code())
}
fn as_byte_code(&self) -> u8 {
use byte_codes::*;
use CovenantFilter::*;
match self {
Identity(_) => FILTER_IDENTITY,
And(_) => FILTER_AND,
Or(_) => FILTER_OR,
Xor(_) => FILTER_XOR,
Not(_) => FILTER_NOT,
OutputHashEq(_) => FILTER_OUTPUT_HASH_EQ,
FieldsPreserved(_) => FILTER_FIELDS_PRESERVED,
FieldEq(_) => FILTER_FIELD_EQ,
FieldsHashedEq(_) => FILTER_FIELDS_HASHED_EQ,
AbsoluteHeight(_) => FILTER_ABSOLUTE_HEIGHT,
}
}
pub fn try_from_byte_code(code: u8) -> Result<Self, CovenantDecodeError> {
use byte_codes::*;
match code {
FILTER_IDENTITY => Ok(Self::identity()),
FILTER_AND => Ok(Self::and()),
FILTER_OR => Ok(Self::or()),
FILTER_XOR => Ok(Self::xor()),
FILTER_NOT => Ok(Self::not()),
FILTER_OUTPUT_HASH_EQ => Ok(Self::output_hash_eq()),
FILTER_FIELDS_PRESERVED => Ok(Self::fields_preserved()),
FILTER_FIELD_EQ => Ok(Self::field_eq()),
FILTER_FIELDS_HASHED_EQ => Ok(Self::fields_hashed_eq()),
FILTER_ABSOLUTE_HEIGHT => Ok(Self::absolute_height()),
_ => Err(CovenantDecodeError::UnknownFilterByteCode { code }),
}
}
pub fn identity() -> Self {
CovenantFilter::Identity(IdentityFilter)
}
pub fn and() -> Self {
CovenantFilter::And(AndFilter)
}
pub fn or() -> Self {
CovenantFilter::Or(OrFilter)
}
pub fn xor() -> Self {
CovenantFilter::Xor(XorFilter)
}
pub fn not() -> Self {
CovenantFilter::Not(NotFilter)
}
pub fn output_hash_eq() -> Self {
CovenantFilter::OutputHashEq(OutputHashEqFilter)
}
pub fn fields_preserved() -> Self {
CovenantFilter::FieldsPreserved(FieldsPreservedFilter)
}
pub fn field_eq() -> Self {
CovenantFilter::FieldEq(FieldEqFilter)
}
pub fn fields_hashed_eq() -> Self {
CovenantFilter::FieldsHashedEq(FieldsHashedEqFilter)
}
pub fn absolute_height() -> Self {
CovenantFilter::AbsoluteHeight(AbsoluteHeightFilter)
}
}
impl Filter for CovenantFilter {
fn filter(&self, context: &mut CovenantContext<'_>, output_set: &mut OutputSet<'_>) -> Result<(), CovenantError> {
use CovenantFilter::*;
match self {
Identity(identity) => identity.filter(context, output_set),
And(and) => and.filter(context, output_set),
Or(or) => or.filter(context, output_set),
Xor(xor) => xor.filter(context, output_set),
Not(not) => not.filter(context, output_set),
OutputHashEq(output_hash_eq) => output_hash_eq.filter(context, output_set),
FieldsPreserved(fields_preserved) => fields_preserved.filter(context, output_set),
FieldEq(fields_eq) => fields_eq.filter(context, output_set),
FieldsHashedEq(fields_hashed_eq) => fields_hashed_eq.filter(context, output_set),
AbsoluteHeight(abs_height) => abs_height.filter(context, output_set),
}
}
}
| 36.343373 | 119 | 0.663683 |
d94d7138aede345a8e0957d3e1943e5af4d5e389 | 2,184 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use libra_types::crypto_proxies::LedgerInfoWithSignatures;
use libra_types::transaction::Version;
use serde::{Deserialize, Serialize};
use std::fmt;
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
/// We're currently considering several types of chunk requests depending on the information
/// available on the requesting side.
pub enum TargetType {
/// The response is built relative to the target (or end of epoch).
TargetLedgerInfo(LedgerInfoWithSignatures),
/// The response is built relative to the highest available LedgerInfo (or end of epoch).
/// The value specifies the timeout in ms to wait for an available response.
/// This "long poll" approach allows an upstream node to add the request to the list of its
/// subscriptions for the duration of a timeout until some new information becomes available.
HighestAvailable { timeout_ms: u64 },
/// The response is built relative to a LedgerInfo at a given version.
Waypoint(Version),
}
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
pub struct GetChunkRequest {
/// The response should start with `known_version + 1`.
pub known_version: Version,
/// Epoch the chunk response is supposed to belong to (i.e., epoch of known_version + 1).
pub current_epoch: u64,
/// Max size of a chunk response.
pub limit: u64,
/// The target of the given request.
target: TargetType,
}
impl GetChunkRequest {
pub fn new(known_version: Version, current_epoch: u64, limit: u64, target: TargetType) -> Self {
Self {
known_version,
current_epoch,
limit,
target,
}
}
pub fn target(&self) -> &TargetType {
&self.target
}
}
impl fmt::Display for GetChunkRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[ChunkRequest: known version: {}, epoch: {}, limit: {}, target: {:?}]",
self.known_version,
self.current_epoch,
self.limit,
self.target(),
)
}
}
| 34.666667 | 100 | 0.657967 |
48a53e9f938244277ed98f03d7ca4455b2f1131e | 5,364 | extern crate sdl2;
use crate::ram::RAM;
use crate::cpu::CPU;
use crate::graphics_buffer::GraphicsBuffer;
use crate::speaker::Speaker;
use std::thread::sleep;
use std::time::{Duration, SystemTime};
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use std::env::args;
mod ram;
mod cpu;
mod byte_register;
mod word_register;
mod graphics_buffer;
mod speaker;
const PIXEL_WIDTH: u32 = 16;
const HEIGHT: u32 = 32*PIXEL_WIDTH;
const WIDTH: u32 = 64*PIXEL_WIDTH;
fn main() {
let args: Vec<String> = args().collect();
let filename = &args[1];
// backend code
let mut ram = RAM::new();
ram.load_rom(String::from(filename));
let mut cpu = CPU::new(ram);
let mut gfx = GraphicsBuffer::new();
//sdl2 code
let sdl = sdl2::init().expect("Could not initalize sdl");
let video_subsystem = sdl.video().expect("Could not initalize video subsystem");
let window = video_subsystem.window("Chip-8", WIDTH, HEIGHT)
.position_centered().build().expect("Could not initialize window");
let mut canvas = window.into_canvas().build().expect("Could not create canvas");
let mut event_pump = sdl.event_pump().expect("Could not initliaze event handler");
let mut last_tick_time = SystemTime::now();
let mut speaker = Speaker::new(sdl);
//main loop
loop {
//input
for event in event_pump.poll_iter() {
match event {
Event::KeyDown { keycode: Some(Keycode::Escape), .. } => { break },
Event::KeyDown { keycode: Some(Keycode::Num1), .. } => { cpu.keys_pressed[1] = true },
Event::KeyDown { keycode: Some(Keycode::Num2), .. } => { cpu.keys_pressed[2] = true },
Event::KeyDown { keycode: Some(Keycode::Num3), .. } => { cpu.keys_pressed[3] = true },
Event::KeyDown { keycode: Some(Keycode::Num4), .. } => { cpu.keys_pressed[0xC] = true },
Event::KeyDown { keycode: Some(Keycode::Q), .. } => { cpu.keys_pressed[4] = true },
Event::KeyDown { keycode: Some(Keycode::W), .. } => { cpu.keys_pressed[5] = true },
Event::KeyDown { keycode: Some(Keycode::E), .. } => { cpu.keys_pressed[6] = true },
Event::KeyDown { keycode: Some(Keycode::R), .. } => { cpu.keys_pressed[0xD] = true },
Event::KeyDown { keycode: Some(Keycode::A), .. } => { cpu.keys_pressed[7] = true },
Event::KeyDown { keycode: Some(Keycode::S), .. } => { cpu.keys_pressed[8] = true },
Event::KeyDown { keycode: Some(Keycode::D), .. } => { cpu.keys_pressed[9] = true },
Event::KeyDown { keycode: Some(Keycode::F), .. } => { cpu.keys_pressed[0xE] = true },
Event::KeyDown { keycode: Some(Keycode::Z), .. } => { cpu.keys_pressed[0xA] = true },
Event::KeyDown { keycode: Some(Keycode::X), .. } => { cpu.keys_pressed[0] = true },
Event::KeyDown { keycode: Some(Keycode::C), .. } => { cpu.keys_pressed[0xB] = true },
Event::KeyDown { keycode: Some(Keycode::V), .. } => { cpu.keys_pressed[0xF] = true },
Event::KeyUp { keycode: Some(Keycode::Num1), .. } => { cpu.keys_pressed[1] = false },
Event::KeyUp { keycode: Some(Keycode::Num2), .. } => { cpu.keys_pressed[2] = false },
Event::KeyUp { keycode: Some(Keycode::Num3), .. } => { cpu.keys_pressed[3] = false },
Event::KeyUp { keycode: Some(Keycode::Num4), .. } => { cpu.keys_pressed[0xC] = false },
Event::KeyUp { keycode: Some(Keycode::Q), .. } => { cpu.keys_pressed[4] = false },
Event::KeyUp { keycode: Some(Keycode::W), .. } => { cpu.keys_pressed[5] = false },
Event::KeyUp { keycode: Some(Keycode::E), .. } => { cpu.keys_pressed[6] = false },
Event::KeyUp { keycode: Some(Keycode::R), .. } => { cpu.keys_pressed[0xD] = false },
Event::KeyUp { keycode: Some(Keycode::A), .. } => { cpu.keys_pressed[7] = false },
Event::KeyUp { keycode: Some(Keycode::S), .. } => { cpu.keys_pressed[8] = false },
Event::KeyUp { keycode: Some(Keycode::D), .. } => { cpu.keys_pressed[9] = false },
Event::KeyUp { keycode: Some(Keycode::F), .. } => { cpu.keys_pressed[0xE] = false },
Event::KeyUp { keycode: Some(Keycode::Z), .. } => { cpu.keys_pressed[0xA] = false },
Event::KeyUp { keycode: Some(Keycode::X), .. } => { cpu.keys_pressed[0] = false },
Event::KeyUp { keycode: Some(Keycode::C), .. } => { cpu.keys_pressed[0xB] = false },
Event::KeyUp { keycode: Some(Keycode::V), .. } => { cpu.keys_pressed[0xF] = false },
_ => {}
}
}
//cpu
cpu.cycle(&mut gfx);
//render
if cpu.draw_flag{
gfx.render(&mut canvas);
cpu.draw_flag = false;
}
//audio
if cpu.sound_flag{
speaker.start();
} else {
speaker.stop();
}
//timer
if last_tick_time.elapsed().expect("Clock error") >= Duration::new(0, 1_000_000_000u32 / 60){
cpu.timer();
last_tick_time = SystemTime::now()
}
//sleep(Duration::from_millis(1));
sleep(Duration::new(0, 1_000_000_000u32 / 2500))
}
}
| 50.130841 | 104 | 0.547539 |
d571d1728ce10de3e5e6194e9a4486da6648e9ac | 6,045 | use std::{
fs::{self},
os::unix::fs::PermissionsExt,
path::{Component::RootDir, Path, PathBuf},
time::Duration,
};
use anyhow::{Context, Result};
use nix::unistd::Pid;
#[cfg(feature = "cgroupsv2_devices")]
use super::devices::Devices;
use super::{
controller::Controller,
controller_type::{
ControllerType, PseudoControllerType, CONTROLLER_TYPES, PSEUDO_CONTROLLER_TYPES,
},
cpu::Cpu,
cpuset::CpuSet,
freezer::Freezer,
hugetlb::HugeTlb,
io::Io,
memory::Memory,
pids::Pids,
unified::Unified,
util::{self, CGROUP_SUBTREE_CONTROL},
};
use crate::{
common::{self, CgroupManager, ControllerOpt, FreezerState, PathBufExt, CGROUP_PROCS},
stats::{Stats, StatsProvider},
};
pub const CGROUP_KILL: &str = "cgroup.kill";
pub struct Manager {
root_path: PathBuf,
cgroup_path: PathBuf,
full_path: PathBuf,
}
impl Manager {
/// Constructs a new cgroup manager with root path being the mount point
/// of a cgroup v2 fs and cgroup path being a relative path from the root
pub fn new(root_path: PathBuf, cgroup_path: PathBuf) -> Result<Self> {
let full_path = root_path.join_safely(&cgroup_path)?;
Ok(Self {
root_path,
cgroup_path,
full_path,
})
}
fn create_unified_cgroup(&self, pid: Pid) -> Result<()> {
let controllers: Vec<String> = util::get_available_controllers(&self.root_path)?
.iter()
.map(|c| format!("{}{}", "+", c))
.collect();
Self::write_controllers(&self.root_path, &controllers)?;
let mut current_path = self.root_path.clone();
let mut components = self
.cgroup_path
.components()
.filter(|c| c.ne(&RootDir))
.peekable();
while let Some(component) = components.next() {
current_path = current_path.join(component);
if !current_path.exists() {
fs::create_dir(¤t_path)?;
fs::metadata(¤t_path)?.permissions().set_mode(0o755);
}
// last component cannot have subtree_control enabled due to internal process constraint
// if this were set, writing to the cgroups.procs file will fail with Erno 16 (device or resource busy)
if components.peek().is_some() {
Self::write_controllers(¤t_path, &controllers)?;
}
}
common::write_cgroup_file(&self.full_path.join(CGROUP_PROCS), pid)?;
Ok(())
}
fn write_controllers(path: &Path, controllers: &[String]) -> Result<()> {
for controller in controllers {
common::write_cgroup_file_str(path.join(CGROUP_SUBTREE_CONTROL), controller)?;
}
Ok(())
}
}
impl CgroupManager for Manager {
fn add_task(&self, pid: Pid) -> Result<()> {
self.create_unified_cgroup(pid)?;
Ok(())
}
fn apply(&self, controller_opt: &ControllerOpt) -> Result<()> {
for controller in CONTROLLER_TYPES {
match controller {
ControllerType::Cpu => Cpu::apply(controller_opt, &self.full_path)?,
ControllerType::CpuSet => CpuSet::apply(controller_opt, &self.full_path)?,
ControllerType::HugeTlb => HugeTlb::apply(controller_opt, &self.full_path)?,
ControllerType::Io => Io::apply(controller_opt, &self.full_path)?,
ControllerType::Memory => Memory::apply(controller_opt, &self.full_path)?,
ControllerType::Pids => Pids::apply(controller_opt, &self.full_path)?,
}
}
#[cfg(feature = "cgroupsv2_devices")]
Devices::apply(controller_opt, &self.cgroup_path)?;
for pseudoctlr in PSEUDO_CONTROLLER_TYPES {
if let PseudoControllerType::Unified = pseudoctlr {
Unified::apply(
controller_opt,
&self.full_path,
util::get_available_controllers(&self.root_path)?,
)?;
}
}
Ok(())
}
fn remove(&self) -> Result<()> {
if self.full_path.exists() {
log::debug!("remove cgroup {:?}", self.full_path);
let kill_file = self.full_path.join(CGROUP_KILL);
if kill_file.exists() {
fs::write(kill_file, "1").context("failed to kill cgroup")?;
} else {
let procs_path = self.full_path.join(CGROUP_PROCS);
let procs = fs::read_to_string(&procs_path)?;
for line in procs.lines() {
let pid: i32 = line.parse()?;
let _ = nix::sys::signal::kill(Pid::from_raw(pid), nix::sys::signal::SIGKILL);
}
}
common::delete_with_retry(&self.full_path, 4, Duration::from_millis(100))?;
}
Ok(())
}
fn freeze(&self, state: FreezerState) -> Result<()> {
let controller_opt = ControllerOpt {
resources: &Default::default(),
freezer_state: Some(state),
oom_score_adj: None,
disable_oom_killer: false,
};
Freezer::apply(&controller_opt, &self.full_path)
}
fn stats(&self) -> Result<Stats> {
let mut stats = Stats::default();
for subsystem in CONTROLLER_TYPES {
match subsystem {
ControllerType::Cpu => stats.cpu.usage = Cpu::stats(&self.full_path)?,
ControllerType::HugeTlb => stats.hugetlb = HugeTlb::stats(&self.full_path)?,
ControllerType::Pids => stats.pids = Pids::stats(&self.full_path)?,
ControllerType::Memory => stats.memory = Memory::stats(&self.full_path)?,
ControllerType::Io => stats.blkio = Io::stats(&self.full_path)?,
_ => continue,
}
}
Ok(stats)
}
fn get_all_pids(&self) -> Result<Vec<Pid>> {
common::get_all_pids(&self.full_path)
}
}
| 33.032787 | 115 | 0.571547 |
9b87a150bb03496770470b5637b90b41d5e9e3e8 | 378 | use nalgebra::Vector3;
use crate::feat::{descriptors::Descriptor, Distance};
pub struct MapPoint<T>
where
T: Distance + Clone,
{
pt: Vector3<f32>, // position
n: Vector3<f32>, // viewing direction,
desc: Descriptor<T>,
dmax: f32, //maximum distance at which the point can be observed
dmin: f32, //minimum distance at which the point can be observed
}
| 25.2 | 68 | 0.68254 |
e81f18faab9e5868cfb52959b4877155636fb0c3 | 3,805 | use bifrost_plugins::hash_ident;
pub mod client;
pub mod server;
// (raft_sid, sm_id, fn_id, pattern_id)
pub type SubKey = (u64, u64, u64, u64);
pub static DEFAULT_SERVICE_ID: u64 = hash_ident!(BIFROST_RAFT_SM_CALLBACK_DEFAULT_SERVICE) as u64;
service! {
rpc notify(key: SubKey, data: Vec<u8>);
}
#[cfg(test)]
mod test {
use crate::raft::client::RaftClient;
use crate::raft::state_machine::callback::server::SMCallback;
use crate::raft::state_machine::StateMachineCtl;
use crate::raft::{Options, RaftService, Storage, DEFAULT_SERVICE_ID};
use crate::rpc::Server;
use crate::utils::time::async_wait_secs;
use future::FutureExt;
use std::sync::atomic::*;
use std::sync::Arc;
pub struct Trigger {
count: u64,
callback: SMCallback,
}
raft_state_machine! {
def cmd trigger();
def sub on_trigged() -> u64;
}
impl StateMachineCmds for Trigger {
fn trigger(&mut self) -> BoxFuture<()> {
self.count += 1;
async move {
self.callback
.notify(commands::on_trigged::new(), self.count)
.await
.unwrap();
}
.boxed()
}
}
impl StateMachineCtl for Trigger {
raft_sm_complete!();
fn id(&self) -> u64 {
10
}
fn snapshot(&self) -> Option<Vec<u8>> {
None
}
fn recover(&mut self, _: Vec<u8>) -> BoxFuture<()> {
future::ready(()).boxed()
}
}
#[tokio::test(flavor = "multi_thread")]
async fn dummy() {
let _ = env_logger::try_init();
info!("TESTING CALLBACK");
let addr = String::from("127.0.0.1:2110");
let raft_service = RaftService::new(Options {
storage: Storage::default(),
address: addr.clone(),
service_id: DEFAULT_SERVICE_ID,
});
let server = Server::new(&addr);
let dummy_sm = Trigger {
count: 0,
callback: SMCallback::new(10, raft_service.clone()).await,
};
let sm_id = dummy_sm.id();
server
.register_service(DEFAULT_SERVICE_ID, &raft_service)
.await;
Server::listen_and_resume(&server).await;
RaftService::start(&raft_service).await;
raft_service
.register_state_machine(Box::new(dummy_sm))
.await;
raft_service.bootstrap().await;
async_wait_secs().await;
let raft_client = RaftClient::new(&vec![addr], DEFAULT_SERVICE_ID)
.await
.unwrap();
let sm_client = Arc::new(client::SMClient::new(sm_id, &raft_client));
let loops = 10;
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
let sumer = Arc::new(AtomicUsize::new(0));
let sumer_clone = sumer.clone();
let mut expected_sum = 0;
RaftClient::prepare_subscription(&server).await;
sm_client
.on_trigged(move |res: u64| {
counter_clone.fetch_add(1, Ordering::Relaxed);
sumer_clone.fetch_add(res as usize, Ordering::Relaxed);
info!("CALLBACK TRIGGERED {}", res);
future::ready(()).boxed()
})
.await
.unwrap()
.unwrap();
for i in 0..loops {
let sm_client = sm_client.clone();
expected_sum += i + 1;
tokio::spawn(async move {
sm_client.trigger().await.unwrap();
});
}
async_wait_secs().await;
assert_eq!(counter.load(Ordering::Relaxed), loops);
assert_eq!(sumer.load(Ordering::Relaxed), expected_sum);
}
}
| 30.198413 | 98 | 0.546124 |
03567cab4a6ef07c54ca4fc2ab6cb94cbfdcdef3 | 850 | // Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
pub mod account_resource_ext;
pub mod account_state;
pub mod channel;
pub mod channel_transaction;
pub mod channel_transaction_info;
pub mod channel_transaction_sigs;
#[cfg(test)]
mod channel_transaction_test;
pub mod channel_transaction_to_commit;
#[macro_use]
pub mod hash;
pub mod applied_channel_txn;
pub mod htlc;
pub mod ledger_info;
pub mod message;
pub mod pending_txn;
pub mod proof;
pub mod proto;
pub mod resource;
#[cfg(test)]
mod resource_test;
pub mod s_value;
pub mod script_package;
pub mod sg_error;
pub mod signed_channel_transaction;
pub mod signed_channel_transaction_with_proof;
pub mod startup_info;
pub mod system_event;
pub mod write_set_item;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
| 21.794872 | 47 | 0.772941 |
bfc13136f1ff23e8538754e40f6cc9d1f4e63749 | 3,252 | #[doc = "Register `se_trng_0_dout_5` reader"]
pub struct R(crate::R<SE_TRNG_0_DOUT_5_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SE_TRNG_0_DOUT_5_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<SE_TRNG_0_DOUT_5_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<SE_TRNG_0_DOUT_5_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `se_trng_0_dout_5` writer"]
pub struct W(crate::W<SE_TRNG_0_DOUT_5_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SE_TRNG_0_DOUT_5_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<SE_TRNG_0_DOUT_5_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<SE_TRNG_0_DOUT_5_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `se_trng_0_dout_5` reader - "]
pub struct SE_TRNG_0_DOUT_5_R(crate::FieldReader<u32, u32>);
impl SE_TRNG_0_DOUT_5_R {
#[inline(always)]
pub(crate) fn new(bits: u32) -> Self {
SE_TRNG_0_DOUT_5_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SE_TRNG_0_DOUT_5_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `se_trng_0_dout_5` writer - "]
pub struct SE_TRNG_0_DOUT_5_W<'a> {
w: &'a mut W,
}
impl<'a> SE_TRNG_0_DOUT_5_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = value;
self.w
}
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn se_trng_0_dout_5(&self) -> SE_TRNG_0_DOUT_5_R {
SE_TRNG_0_DOUT_5_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn se_trng_0_dout_5(&mut self) -> SE_TRNG_0_DOUT_5_W {
SE_TRNG_0_DOUT_5_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "se_trng_0_dout_5.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [se_trng_0_dout_5](index.html) module"]
pub struct SE_TRNG_0_DOUT_5_SPEC;
impl crate::RegisterSpec for SE_TRNG_0_DOUT_5_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [se_trng_0_dout_5::R](R) reader structure"]
impl crate::Readable for SE_TRNG_0_DOUT_5_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [se_trng_0_dout_5::W](W) writer structure"]
impl crate::Writable for SE_TRNG_0_DOUT_5_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets se_trng_0_dout_5 to value 0"]
impl crate::Resettable for SE_TRNG_0_DOUT_5_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 31.269231 | 414 | 0.635301 |
62198e21ef7ce7ca4fb5449cd108472aef35b948 | 1,600 | /// Calls which should trigger the `UNNECESSARY_FOLD` lint
fn unnecessary_fold() {
// Can be replaced by .any
let _ = (0..3).fold(false, |acc, x| acc || x > 2);
// Can be replaced by .all
let _ = (0..3).fold(true, |acc, x| acc && x > 2);
// Can be replaced by .sum
let _ = (0..3).fold(0, |acc, x| acc + x);
// Can be replaced by .product
let _ = (0..3).fold(1, |acc, x| acc * x);
}
/// Should trigger the `UNNECESSARY_FOLD` lint, with an error span including exactly `.fold(...)`
fn unnecessary_fold_span_for_multi_element_chain() {
let _ = (0..3).map(|x| 2 * x).fold(false, |acc, x| acc || x > 2);
}
/// Calls which should not trigger the `UNNECESSARY_FOLD` lint
fn unnecessary_fold_should_ignore() {
let _ = (0..3).fold(true, |acc, x| acc || x > 2);
let _ = (0..3).fold(false, |acc, x| acc && x > 2);
let _ = (0..3).fold(1, |acc, x| acc + x);
let _ = (0..3).fold(0, |acc, x| acc * x);
let _ = (0..3).fold(0, |acc, x| 1 + acc + x);
// We only match against an accumulator on the left
// hand side. We could lint for .sum and .product when
// it's on the right, but don't for now (and this wouldn't
// be valid if we extended the lint to cover arbitrary numeric
// types).
let _ = (0..3).fold(false, |acc, x| x > 2 || acc);
let _ = (0..3).fold(true, |acc, x| x > 2 && acc);
let _ = (0..3).fold(0, |acc, x| x + acc);
let _ = (0..3).fold(1, |acc, x| x * acc);
let _ = [(0..2), (0..3)].iter().fold(0, |a, b| a + b.len());
let _ = [(0..2), (0..3)].iter().fold(1, |a, b| a * b.len());
}
fn main() {}
| 39.02439 | 97 | 0.544375 |
f790e3236ec951c596eaa4238df4bb7c59d66084 | 18,268 | extern crate serde;
extern crate rltk;
use rltk::{Console, GameState, Rltk, Point};
extern crate specs;
use specs::prelude::*;
use specs::saveload::{SimpleMarker, SimpleMarkerAllocator};
#[macro_use]
extern crate specs_derive;
mod components;
pub use components::*;
mod map;
pub use map::*;
mod player;
use player::*;
mod rect;
pub use rect::Rect;
mod visibility_system;
use visibility_system::VisibilitySystem;
mod monster_ai_system;
use monster_ai_system::MonsterAI;
mod map_indexing_system;
use map_indexing_system::MapIndexingSystem;
mod melee_combat_system;
use melee_combat_system::MeleeCombatSystem;
mod damage_system;
use damage_system::DamageSystem;
mod gui;
mod gamelog;
mod spawner;
mod inventory_system;
use inventory_system::{ ItemCollectionSystem, ItemUseSystem, ItemDropSystem, ItemRemoveSystem };
pub mod saveload_system;
pub mod random_table;
pub mod particle_system;
pub mod hunger_system;
pub mod rex_assets;
pub mod trigger_system;
pub mod map_builders;
const SHOW_MAPGEN_VISUALIZER : bool = true;
#[derive(PartialEq, Copy, Clone)]
pub enum RunState { AwaitingInput,
PreRun,
PlayerTurn,
MonsterTurn,
ShowInventory,
ShowDropItem,
ShowTargeting { range : i32, item : Entity},
MainMenu { menu_selection : gui::MainMenuSelection },
SaveGame,
NextLevel,
ShowRemoveItem,
GameOver,
MagicMapReveal { row : i32 },
MapGeneration
}
pub struct State {
pub ecs: World,
mapgen_next_state : Option<RunState>,
mapgen_history : Vec<Map>,
mapgen_index : usize,
mapgen_timer : f32
}
impl State {
fn run_systems(&mut self) {
let mut vis = VisibilitySystem{};
vis.run_now(&self.ecs);
let mut mob = MonsterAI{};
mob.run_now(&self.ecs);
let mut mapindex = MapIndexingSystem{};
mapindex.run_now(&self.ecs);
let mut triggers = trigger_system::TriggerSystem{};
triggers.run_now(&self.ecs);
let mut melee = MeleeCombatSystem{};
melee.run_now(&self.ecs);
let mut damage = DamageSystem{};
damage.run_now(&self.ecs);
let mut pickup = ItemCollectionSystem{};
pickup.run_now(&self.ecs);
let mut itemuse = ItemUseSystem{};
itemuse.run_now(&self.ecs);
let mut drop_items = ItemDropSystem{};
drop_items.run_now(&self.ecs);
let mut item_remove = ItemRemoveSystem{};
item_remove.run_now(&self.ecs);
let mut hunger = hunger_system::HungerSystem{};
hunger.run_now(&self.ecs);
let mut particles = particle_system::ParticleSpawnSystem{};
particles.run_now(&self.ecs);
self.ecs.maintain();
}
}
impl GameState for State {
fn tick(&mut self, ctx : &mut Rltk) {
let mut newrunstate;
{
let runstate = self.ecs.fetch::<RunState>();
newrunstate = *runstate;
}
ctx.cls();
particle_system::cull_dead_particles(&mut self.ecs, ctx);
match newrunstate {
RunState::MainMenu{..} => {}
RunState::GameOver{..} => {}
_ => {
draw_map(&self.ecs.fetch::<Map>(), ctx);
let positions = self.ecs.read_storage::<Position>();
let renderables = self.ecs.read_storage::<Renderable>();
let hidden = self.ecs.read_storage::<Hidden>();
let map = self.ecs.fetch::<Map>();
let mut data = (&positions, &renderables, !&hidden).join().collect::<Vec<_>>();
data.sort_by(|&a, &b| b.1.render_order.cmp(&a.1.render_order) );
for (pos, render, _hidden) in data.iter() {
let idx = map.xy_idx(pos.x, pos.y);
if map.visible_tiles[idx] { ctx.set(pos.x, pos.y, render.fg, render.bg, render.glyph) }
}
gui::draw_ui(&self.ecs, ctx);
}
}
match newrunstate {
RunState::MapGeneration => {
if !SHOW_MAPGEN_VISUALIZER {
newrunstate = self.mapgen_next_state.unwrap();
}
ctx.cls();
draw_map(&self.mapgen_history[self.mapgen_index], ctx);
self.mapgen_timer += ctx.frame_time_ms;
if self.mapgen_timer > 300.0 {
self.mapgen_timer = 0.0;
self.mapgen_index += 1;
if self.mapgen_index >= self.mapgen_history.len() {
newrunstate = self.mapgen_next_state.unwrap();
}
}
}
RunState::PreRun => {
self.run_systems();
self.ecs.maintain();
newrunstate = RunState::AwaitingInput;
}
RunState::AwaitingInput => {
newrunstate = player_input(self, ctx);
}
RunState::PlayerTurn => {
self.run_systems();
self.ecs.maintain();
match *self.ecs.fetch::<RunState>() {
RunState::MagicMapReveal{ .. } => newrunstate = RunState::MagicMapReveal{ row: 0 },
_ => newrunstate = RunState::MonsterTurn
}
}
RunState::MonsterTurn => {
self.run_systems();
self.ecs.maintain();
newrunstate = RunState::AwaitingInput;
}
RunState::ShowInventory => {
let result = gui::show_inventory(self, ctx);
match result.0 {
gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput,
gui::ItemMenuResult::NoResponse => {}
gui::ItemMenuResult::Selected => {
let item_entity = result.1.unwrap();
let is_ranged = self.ecs.read_storage::<Ranged>();
let is_item_ranged = is_ranged.get(item_entity);
if let Some(is_item_ranged) = is_item_ranged {
newrunstate = RunState::ShowTargeting{ range: is_item_ranged.range, item: item_entity };
} else {
let mut intent = self.ecs.write_storage::<WantsToUseItem>();
intent.insert(*self.ecs.fetch::<Entity>(), WantsToUseItem{ item: item_entity, target: None }).expect("Unable to insert intent");
newrunstate = RunState::PlayerTurn;
}
}
}
}
RunState::ShowDropItem => {
let result = gui::drop_item_menu(self, ctx);
match result.0 {
gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput,
gui::ItemMenuResult::NoResponse => {}
gui::ItemMenuResult::Selected => {
let item_entity = result.1.unwrap();
let mut intent = self.ecs.write_storage::<WantsToDropItem>();
intent.insert(*self.ecs.fetch::<Entity>(), WantsToDropItem{ item: item_entity }).expect("Unable to insert intent");
newrunstate = RunState::PlayerTurn;
}
}
}
RunState::ShowRemoveItem => {
let result = gui::remove_item_menu(self, ctx);
match result.0 {
gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput,
gui::ItemMenuResult::NoResponse => {}
gui::ItemMenuResult::Selected => {
let item_entity = result.1.unwrap();
let mut intent = self.ecs.write_storage::<WantsToRemoveItem>();
intent.insert(*self.ecs.fetch::<Entity>(), WantsToRemoveItem{ item: item_entity }).expect("Unable to insert intent");
newrunstate = RunState::PlayerTurn;
}
}
}
RunState::ShowTargeting{range, item} => {
let result = gui::ranged_target(self, ctx, range);
match result.0 {
gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput,
gui::ItemMenuResult::NoResponse => {}
gui::ItemMenuResult::Selected => {
let mut intent = self.ecs.write_storage::<WantsToUseItem>();
intent.insert(*self.ecs.fetch::<Entity>(), WantsToUseItem{ item, target: result.1 }).expect("Unable to insert intent");
newrunstate = RunState::PlayerTurn;
}
}
}
RunState::MainMenu{ .. } => {
let result = gui::main_menu(self, ctx);
match result {
gui::MainMenuResult::NoSelection{ selected } => newrunstate = RunState::MainMenu{ menu_selection: selected },
gui::MainMenuResult::Selected{ selected } => {
match selected {
gui::MainMenuSelection::NewGame => newrunstate = RunState::PreRun,
gui::MainMenuSelection::LoadGame => {
saveload_system::load_game(&mut self.ecs);
newrunstate = RunState::AwaitingInput;
saveload_system::delete_save();
}
gui::MainMenuSelection::Quit => { ::std::process::exit(0); }
}
}
}
}
RunState::GameOver => {
let result = gui::game_over(ctx);
match result {
gui::GameOverResult::NoSelection => {}
gui::GameOverResult::QuitToMenu => {
self.game_over_cleanup();
newrunstate = RunState::MapGeneration;
self.mapgen_next_state = Some(RunState::MainMenu{ menu_selection: gui::MainMenuSelection::NewGame });
}
}
}
RunState::SaveGame => {
saveload_system::save_game(&mut self.ecs);
newrunstate = RunState::MainMenu{ menu_selection : gui::MainMenuSelection::LoadGame };
}
RunState::NextLevel => {
self.goto_next_level();
self.mapgen_next_state = Some(RunState::PreRun);
newrunstate = RunState::MapGeneration;
}
RunState::MagicMapReveal{row} => {
let mut map = self.ecs.fetch_mut::<Map>();
for x in 0..MAPWIDTH {
let idx = map.xy_idx(x as i32,row);
map.revealed_tiles[idx] = true;
}
if row as usize == MAPHEIGHT-1 {
newrunstate = RunState::MonsterTurn;
} else {
newrunstate = RunState::MagicMapReveal{ row: row+1 };
}
}
}
{
let mut runwriter = self.ecs.write_resource::<RunState>();
*runwriter = newrunstate;
}
damage_system::delete_the_dead(&mut self.ecs);
}
}
impl State {
fn entities_to_remove_on_level_change(&mut self) -> Vec<Entity> {
let entities = self.ecs.entities();
let player = self.ecs.read_storage::<Player>();
let backpack = self.ecs.read_storage::<InBackpack>();
let player_entity = self.ecs.fetch::<Entity>();
let equipped = self.ecs.read_storage::<Equipped>();
let mut to_delete : Vec<Entity> = Vec::new();
for entity in entities.join() {
let mut should_delete = true;
// Don't delete the player
let p = player.get(entity);
if let Some(_p) = p {
should_delete = false;
}
// Don't delete the player's equipment
let bp = backpack.get(entity);
if let Some(bp) = bp {
if bp.owner == *player_entity {
should_delete = false;
}
}
let eq = equipped.get(entity);
if let Some(eq) = eq {
if eq.owner == *player_entity {
should_delete = false;
}
}
if should_delete {
to_delete.push(entity);
}
}
to_delete
}
fn goto_next_level(&mut self) {
// Delete entities that aren't the player or his/her equipment
let to_delete = self.entities_to_remove_on_level_change();
for target in to_delete {
self.ecs.delete_entity(target).expect("Unable to delete entity");
}
// Build a new map and place the player
let current_depth;
{
let worldmap_resource = self.ecs.fetch::<Map>();
current_depth = worldmap_resource.depth;
}
self.generate_world_map(current_depth + 1);
// Notify the player and give them some health
let player_entity = self.ecs.fetch::<Entity>();
let mut gamelog = self.ecs.fetch_mut::<gamelog::GameLog>();
gamelog.entries.push("You descend to the next level, and take a moment to heal.".to_string());
let mut player_health_store = self.ecs.write_storage::<CombatStats>();
let player_health = player_health_store.get_mut(*player_entity);
if let Some(player_health) = player_health {
player_health.hp = i32::max(player_health.hp, player_health.max_hp / 2);
}
}
fn game_over_cleanup(&mut self) {
// Delete everything
let mut to_delete = Vec::new();
for e in self.ecs.entities().join() {
to_delete.push(e);
}
for del in to_delete.iter() {
self.ecs.delete_entity(*del).expect("Deletion failed");
}
// Spawn a new player
{
let player_entity = spawner::player(&mut self.ecs, 0, 0);
let mut player_entity_writer = self.ecs.write_resource::<Entity>();
*player_entity_writer = player_entity;
}
// Build a new map and place the player
self.generate_world_map(1);
}
fn generate_world_map(&mut self, new_depth : i32) {
self.mapgen_index = 0;
self.mapgen_timer = 0.0;
self.mapgen_history.clear();
let mut builder = map_builders::random_builder(new_depth);
builder.build_map();
self.mapgen_history = builder.get_snapshot_history();
let player_start;
{
let mut worldmap_resource = self.ecs.write_resource::<Map>();
*worldmap_resource = builder.get_map();
player_start = builder.get_starting_position();
}
// Spawn bad guys
builder.spawn_entities(&mut self.ecs);
// Place the player and update resources
let (player_x, player_y) = (player_start.x, player_start.y);
let mut player_position = self.ecs.write_resource::<Point>();
*player_position = Point::new(player_x, player_y);
let mut position_components = self.ecs.write_storage::<Position>();
let player_entity = self.ecs.fetch::<Entity>();
let player_pos_comp = position_components.get_mut(*player_entity);
if let Some(player_pos_comp) = player_pos_comp {
player_pos_comp.x = player_x;
player_pos_comp.y = player_y;
}
// Mark the player's visibility as dirty
let mut viewshed_components = self.ecs.write_storage::<Viewshed>();
let vs = viewshed_components.get_mut(*player_entity);
if let Some(vs) = vs {
vs.dirty = true;
}
}
}
fn main() {
let mut context = Rltk::init_simple8x8(80, 50, "Hello Rust World", "resources");
context.with_post_scanlines(true);
let mut gs = State {
ecs: World::new(),
mapgen_next_state : Some(RunState::MainMenu{ menu_selection: gui::MainMenuSelection::NewGame }),
mapgen_index : 0,
mapgen_history: Vec::new(),
mapgen_timer: 0.0
};
gs.ecs.register::<Position>();
gs.ecs.register::<Renderable>();
gs.ecs.register::<Player>();
gs.ecs.register::<Viewshed>();
gs.ecs.register::<Monster>();
gs.ecs.register::<Name>();
gs.ecs.register::<BlocksTile>();
gs.ecs.register::<CombatStats>();
gs.ecs.register::<WantsToMelee>();
gs.ecs.register::<SufferDamage>();
gs.ecs.register::<Item>();
gs.ecs.register::<ProvidesHealing>();
gs.ecs.register::<InflictsDamage>();
gs.ecs.register::<AreaOfEffect>();
gs.ecs.register::<Consumable>();
gs.ecs.register::<Ranged>();
gs.ecs.register::<InBackpack>();
gs.ecs.register::<WantsToPickupItem>();
gs.ecs.register::<WantsToUseItem>();
gs.ecs.register::<WantsToDropItem>();
gs.ecs.register::<Confusion>();
gs.ecs.register::<SimpleMarker<SerializeMe>>();
gs.ecs.register::<SerializationHelper>();
gs.ecs.register::<Equippable>();
gs.ecs.register::<Equipped>();
gs.ecs.register::<MeleePowerBonus>();
gs.ecs.register::<DefenseBonus>();
gs.ecs.register::<WantsToRemoveItem>();
gs.ecs.register::<ParticleLifetime>();
gs.ecs.register::<HungerClock>();
gs.ecs.register::<ProvidesFood>();
gs.ecs.register::<MagicMapper>();
gs.ecs.register::<Hidden>();
gs.ecs.register::<EntryTrigger>();
gs.ecs.register::<EntityMoved>();
gs.ecs.register::<SingleActivation>();
gs.ecs.insert(SimpleMarkerAllocator::<SerializeMe>::new());
gs.ecs.insert(Map::new(1));
gs.ecs.insert(Point::new(0, 0));
gs.ecs.insert(rltk::RandomNumberGenerator::new());
let player_entity = spawner::player(&mut gs.ecs, 0, 0);
gs.ecs.insert(player_entity);
gs.ecs.insert(RunState::MapGeneration{} );
gs.ecs.insert(gamelog::GameLog{ entries : vec!["Welcome to Rusty Roguelike".to_string()] });
gs.ecs.insert(particle_system::ParticleBuilder::new());
gs.ecs.insert(rex_assets::RexAssets::new());
gs.generate_world_map(1);
rltk::main_loop(context, gs);
}
| 38.540084 | 156 | 0.553153 |
ef9f2f06dede91a1a9f53e80dc4c6b6b5ae4ffee | 42,903 | use std::convert::TryFrom;
use std::{
io::{Error as IoError, Write},
net::{SocketAddr, UdpSocket},
path::PathBuf,
};
use chrono::Utc;
use modality_probe_collector_common::{self as common, json, Report, ReportLogEntry, SessionId};
mod opts;
pub use opts::*;
#[derive(Debug, PartialEq)]
pub struct Config {
pub addr: SocketAddr,
pub session_id: SessionId,
pub output_file: PathBuf,
}
pub struct ShutdownSignalSender {
pub sender: std::sync::mpsc::Sender<()>,
pub server_addr: SocketAddr,
}
const OS_PICK_ADDR_HINT: &str = "0.0.0.0:0";
pub type ShutdownSignalReceiver = std::sync::mpsc::Receiver<()>;
impl ShutdownSignalSender {
pub fn new(server_addr: SocketAddr) -> (ShutdownSignalSender, ShutdownSignalReceiver) {
let (sender, receiver) = std::sync::mpsc::channel();
(
ShutdownSignalSender {
sender,
server_addr,
},
receiver,
)
}
pub fn shutdown(&self) {
if self.sender.send(()).is_err() {
// The server side receiving the message is already gone
return;
}
if let Ok(socket) = UdpSocket::bind(OS_PICK_ADDR_HINT) {
// Try to send a dummy byte to kick the server's silly synchronous
// receive loop
let _ = socket.send_to(&[0], self.server_addr);
}
}
}
pub fn start_receiving(
config: Config,
shutdown_signal_receiver: ShutdownSignalReceiver,
) -> Result<(), IoError> {
let mut file = std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(config.output_file)?;
start_receiving_at_addr(
config.addr,
config.session_id,
&mut file,
shutdown_signal_receiver,
)
}
pub fn start_receiving_at_addr<W: Write>(
addr: SocketAddr,
session_id: SessionId,
log_output_writer: &mut W,
shutdown_signal_receiver: ShutdownSignalReceiver,
) -> Result<(), IoError> {
start_receiving_from_socket(
UdpSocket::bind(addr)?,
session_id,
log_output_writer,
shutdown_signal_receiver,
);
Ok(())
}
pub fn start_receiving_from_socket<W: Write>(
socket: UdpSocket,
session_id: SessionId,
log_output_writer: &mut W,
shutdown_signal_receiver: ShutdownSignalReceiver,
) {
let addr = socket.local_addr().map(|a| a.to_string());
let mut buf = vec![0u8; 1024 * 1024];
let mut log_entries_buffer: Vec<ReportLogEntry> = Vec::with_capacity(4096);
loop {
if shutdown_signal_receiver.try_recv().is_ok() {
return;
}
// Be sure to zero out the first few bytes to ensure that the
// magic fingerprint words are not stale.
for b in buf[..8].iter_mut() {
*b = 0;
}
let (bytes_read, _src) = match socket.recv_from(&mut buf) {
Ok(r) => r,
Err(e) => {
match addr.as_ref() {
Ok(a) => eprintln!("Error during recv_from on {} : {}", a, e),
Err(_) => eprintln!("Error during recv_from : {}", e),
}
continue;
}
};
if bytes_read == 1 && buf[0] == 0 {
// Dummy byte received solely for the purpose of kicking the server's recv loop
// during a shutdown
continue;
}
let receive_time = Utc::now();
// N.B. If we were feeling bottlenecked, hand off the read bytes to another thread
// N.B. If we were feeling fancy, do said handoff by reading directly into a rotating preallocated
// slot in a concurrent queue, ala LMAX Disruptor
// N.B. To avoid copies and allocation, skip materializing a log report
// and instead directly create log entries. Probably wise to wait until the
// log format settles down some before doing this.
log_entries_buffer.clear();
match Report::try_from(&buf[..bytes_read]) {
Ok(log_report) => {
if let Err(e) = common::add_log_report_to_entries(
&log_report,
session_id,
receive_time,
&mut log_entries_buffer,
) {
eprintln!(
"Encountered a malformed report, not adding it to the trace: {}",
e
)
}
}
Err(_) => {
eprintln!(
"Error parsing a message as a report, throwing away {} bytes",
bytes_read
);
continue;
}
}
if let Err(e) = json::write_log_entries(log_output_writer, &log_entries_buffer) {
eprintln!("Error writing log entries: {}", e);
}
let _ = log_output_writer.flush();
}
}
#[cfg(test)]
mod tests {
use std::{
collections::{HashMap, HashSet},
convert::TryInto,
net::{Ipv4Addr, SocketAddrV4, TcpListener},
sync::{
atomic::{AtomicU16, AtomicU32, Ordering},
Mutex,
},
thread,
};
use chrono::DateTime;
use lazy_static::*;
use pretty_assertions::assert_eq;
use modality_probe::time::{NanosecondResolution, WallClockId};
use modality_probe::*;
use modality_probe_collector_common::*;
use super::*;
use std::mem::MaybeUninit;
fn dummy_report(raw_main_probe_id: u32) -> Report {
Report {
probe_id: ProbeId::new(raw_main_probe_id).unwrap(),
probe_clock: LogicalClock {
id: ProbeId::new(2).unwrap(),
epoch: ProbeEpoch(1),
ticks: ProbeTicks(1),
},
seq_num: 1.into(),
persistent_epoch_counting: false,
time_resolution: NanosecondResolution::UNSPECIFIED,
wall_clock_id: WallClockId::default(),
frontier_clocks: vec![LogicalClock {
id: ProbeId::new(raw_main_probe_id).unwrap(),
epoch: ProbeEpoch(0),
ticks: ProbeTicks(0),
}],
event_log: vec![
EventLogEntry::Event(EventId::new(2).unwrap()),
EventLogEntry::TraceClock(LogicalClock {
id: ProbeId::new(2).unwrap(),
epoch: ProbeEpoch(1),
ticks: ProbeTicks(1),
}),
EventLogEntry::TraceClock(LogicalClock {
id: ProbeId::new(1).unwrap(),
epoch: ProbeEpoch(0),
ticks: ProbeTicks(0),
}),
],
}
}
fn report_and_matching_entries(
raw_main_probe_id: u32,
session_id: SessionId,
receive_time: DateTime<Utc>,
) -> (Report, Vec<ReportLogEntry>) {
let main_probe_id = raw_main_probe_id.try_into().unwrap();
let rep = dummy_report(raw_main_probe_id);
let entries = vec![
ReportLogEntry {
session_id,
sequence_number: 1.into(),
sequence_index: 0,
probe_id: main_probe_id,
persistent_epoch_counting: rep.persistent_epoch_counting,
time_resolution: rep.time_resolution,
wall_clock_id: rep.wall_clock_id,
data: LogEntryData::FrontierClock(LogicalClock {
id: main_probe_id,
epoch: ProbeEpoch(0),
ticks: ProbeTicks(0),
}),
clock: LogicalClock {
id: main_probe_id,
epoch: ProbeEpoch(0),
ticks: ProbeTicks(0),
},
receive_time,
},
ReportLogEntry {
session_id,
sequence_number: 1.into(),
sequence_index: 1,
probe_id: main_probe_id,
persistent_epoch_counting: rep.persistent_epoch_counting,
time_resolution: rep.time_resolution,
wall_clock_id: rep.wall_clock_id,
data: LogEntryData::Event(EventId::new(2).unwrap()),
clock: LogicalClock {
id: main_probe_id,
epoch: ProbeEpoch(0),
ticks: ProbeTicks(0),
},
receive_time,
},
ReportLogEntry {
session_id,
sequence_number: 1.into(),
sequence_index: 2,
probe_id: main_probe_id,
persistent_epoch_counting: rep.persistent_epoch_counting,
time_resolution: rep.time_resolution,
wall_clock_id: rep.wall_clock_id,
data: LogEntryData::TraceClock(LogicalClock {
id: main_probe_id,
epoch: ProbeEpoch(1),
ticks: ProbeTicks(1),
}),
clock: LogicalClock {
id: main_probe_id,
epoch: ProbeEpoch(1),
ticks: ProbeTicks(1),
},
receive_time,
},
ReportLogEntry {
session_id,
sequence_number: 1.into(),
sequence_index: 3,
probe_id: main_probe_id,
persistent_epoch_counting: rep.persistent_epoch_counting,
time_resolution: rep.time_resolution,
wall_clock_id: rep.wall_clock_id,
data: LogEntryData::TraceClock(LogicalClock {
id: ProbeId::new(1).unwrap(),
epoch: ProbeEpoch(0),
ticks: ProbeTicks(0),
}),
clock: LogicalClock {
id: main_probe_id,
epoch: ProbeEpoch(1),
ticks: ProbeTicks(1),
},
receive_time,
},
];
(rep, entries)
}
#[test]
fn log_report_to_entries() {
let raw_main_probe_id = 2;
let session_id = 81.into();
let receive_time = Utc::now();
let (report, expected_entries) =
report_and_matching_entries(raw_main_probe_id, session_id, receive_time);
let mut entries = Vec::new();
add_log_report_to_entries(&report, session_id, receive_time, &mut entries).unwrap();
assert_eq!(4, entries.len());
for (idx, e) in entries.iter().enumerate() {
assert_eq!(idx, e.sequence_index as usize);
}
assert_eq!(expected_entries, entries);
}
lazy_static! {
static ref ACTIVE_TEST_PORTS: Mutex<HashSet<u16>> = Mutex::new(Default::default());
}
static STARTING_PORT: AtomicU16 = AtomicU16::new(8000);
fn find_usable_addrs(limit: usize) -> Vec<SocketAddr> {
let start_at = STARTING_PORT.load(Ordering::SeqCst);
let mut ports = ACTIVE_TEST_PORTS.lock().unwrap();
(start_at..start_at + 1000)
.filter_map(|port| {
if ports.contains(&port) {
return None;
}
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port));
if let Ok(tcp_binding) = TcpListener::bind(addr) {
STARTING_PORT.store(port + 1, Ordering::SeqCst);
ports.insert(port);
std::mem::drop(tcp_binding);
if UdpSocket::bind(addr).is_ok() {
Some(addr)
} else {
None
}
} else {
None
}
})
.take(limit)
.collect()
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum ServerState {
Started,
Shutdown,
}
static TICKING_SESSION_ID: AtomicU32 = AtomicU32::new(314);
fn gen_session_id() -> u32 {
TICKING_SESSION_ID.fetch_add(1, Ordering::SeqCst)
}
#[test]
fn minimal_round_trip() {
let addrs = find_usable_addrs(2);
let server_addr = *addrs.first().unwrap();
let (shutdown_sender, shutdown_receiver) = ShutdownSignalSender::new(server_addr);
let (server_state_sender, server_state_receiver) = crossbeam::unbounded();
let session_id = gen_session_id().into();
let f = tempfile::NamedTempFile::new().expect("Could not make temp file");
let output_file_path = PathBuf::from(f.path());
let config = Config {
addr: server_addr,
session_id,
output_file: output_file_path.clone(),
};
let h = std::thread::spawn(move || {
let mut file = std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(config.output_file)
.expect("Could not open file for writing");
let socket = UdpSocket::bind(config.addr).expect("Could not bind to server socket");
server_state_sender
.send(ServerState::Started)
.expect("Could not send status update");
start_receiving_from_socket(socket, config.session_id, &mut file, shutdown_receiver);
let _ = server_state_sender.send(ServerState::Shutdown);
});
thread::yield_now();
let log_report = dummy_report(31);
if let ServerState::Started = server_state_receiver
.recv()
.expect("Could not get state update")
{
let mut lcm_log_report = [0u8; 1024];
let lcm_bytes = log_report
.write_into_le_bytes(&mut lcm_log_report)
.expect("Could not write log report as lcm");
let client_addr = addrs[1];
let socket =
UdpSocket::bind(client_addr).expect("Could not bind to socket for sending");
socket
.send_to(&lcm_log_report[..lcm_bytes], server_addr)
.expect("Could not send lcm bytes");
thread::sleep(std::time::Duration::from_millis(200));
shutdown_sender.shutdown();
} else {
panic!("Server did not start up");
}
let ss = server_state_receiver
.recv()
.expect("Could not get state update");
if ss != ServerState::Shutdown {
panic!("Expected the server to have shut down");
}
let mut file_reader =
std::fs::File::open(&output_file_path).expect("Could not open output file for reading");
let found_log_entries = json::read_log_entries(&mut file_reader)
.expect("Could not read output file as json log entries");
let expected_entries: usize = log_report.frontier_clocks.len() + log_report.event_log.len();
assert_eq!(expected_entries, found_log_entries.len());
let found_entry_ids: HashSet<_> = found_log_entries
.iter()
.map(|e| (e.session_id, e.sequence_number, e.sequence_index))
.collect();
assert_eq!(
expected_entries,
found_entry_ids.len(),
"All entries must have unique id tuples"
);
for e in found_log_entries.iter() {
assert_eq!(session_id, e.session_id);
assert_eq!(log_report.probe_id, e.probe_id);
}
h.join().expect("Couldn't join server handler thread");
}
const SNAPSHOT_BYTES_SIZE: usize = 12;
const PROBE_STORAGE_BYTES_SIZE: usize = 512;
const LOG_REPORT_BYTES_SIZE: usize = 512;
#[test]
fn linear_triple_inferred_unreporting_middleman_graph() {
let addrs = find_usable_addrs(1);
let server_addr = addrs[0];
let (shutdown_sender, shutdown_receiver) = ShutdownSignalSender::new(server_addr);
let (server_state_sender, server_state_receiver) = crossbeam::bounded(0);
let session_id = gen_session_id().into();
let f = tempfile::NamedTempFile::new().expect("Could not make temp file");
let output_file_path = PathBuf::from(f.path());
let config = Config {
addr: server_addr,
session_id,
output_file: output_file_path.clone(),
};
let h = thread::spawn(move || {
let mut file = std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(config.output_file)
.expect("Could not open file for writing");
let socket = UdpSocket::bind(config.addr).expect("Could not bind to server socket");
server_state_sender
.send(ServerState::Started)
.expect("Could not send status update");
start_receiving_from_socket(socket, config.session_id, &mut file, shutdown_receiver);
let _ = server_state_sender.send(ServerState::Shutdown);
});
thread::yield_now();
assert_eq!(Ok(ServerState::Started), server_state_receiver.recv());
let mut net = proc_graph::Network::new();
let probe_a_id = modality_probe::ProbeId::new(131).unwrap();
let probe_b_id = modality_probe::ProbeId::new(141).unwrap();
let probe_c_id = modality_probe::ProbeId::new(159).unwrap();
let event_foo = EventLogEntry::Event(modality_probe::EventId::new(7).unwrap());
let event_bar = EventLogEntry::Event(modality_probe::EventId::new(23).unwrap());
let event_baz = EventLogEntry::Event(modality_probe::EventId::new(29).unwrap());
const NUM_MESSAGES_FROM_A: usize = 11;
let (network_done_sender, network_done_receiver) = crossbeam::bounded(0);
net.add_process(
"a",
vec!["b"],
make_message_broadcaster_proc(
"a",
probe_a_id,
NUM_MESSAGES_FROM_A,
server_addr,
Some(event_foo),
),
);
net.add_process(
"b",
vec!["c"],
make_message_relay_proc("b", probe_b_id, NUM_MESSAGES_FROM_A, None, Some(event_bar)),
);
net.add_process(
"c",
vec![],
make_message_sink_proc(
probe_c_id,
NUM_MESSAGES_FROM_A,
SendLogReportEveryFewMessages {
n_messages: 3,
collector_addr: server_addr,
},
Some(event_baz),
network_done_sender,
),
);
net.start();
thread::yield_now();
assert_eq!(Ok(()), network_done_receiver.recv());
// Thanks, UDP
std::thread::sleep(std::time::Duration::from_millis(200));
shutdown_sender.shutdown();
assert_eq!(Ok(ServerState::Shutdown), server_state_receiver.recv());
h.join().expect("Couldn't join server handler thread");
let mut file_reader =
std::fs::File::open(&output_file_path).expect("Could not open output file for reading");
let found_log_entries = json::read_log_entries(&mut file_reader)
.expect("Could not read output file as json log entries");
assert!(found_log_entries.len() > 0);
let expected_direct_probe_ids: HashSet<_> =
[probe_a_id, probe_c_id].iter().copied().collect();
let built_in_event_ids: HashSet<_> =
modality_probe::EventId::INTERNAL_EVENTS.iter().collect();
for e in found_log_entries {
assert_eq!(session_id, e.session_id);
assert!(expected_direct_probe_ids.contains(&e.probe_id));
match e.data {
LogEntryData::Event(event) => {
// Event bar is logged only on b, and thus lost
if EventLogEntry::Event(event) == event_bar {
panic!("How the heck did bar get over there?");
}
if e.probe_id.get_raw() == probe_a_id.get_raw() {
// Process A should only be writing about event foo or the probe internal events
assert!(
EventLogEntry::Event(event) == event_foo
|| built_in_event_ids.contains(&event)
);
} else if e.probe_id.get_raw() == probe_c_id.get_raw() {
// Process C should only be writing about event baz or the probe internals events
assert!(
EventLogEntry::Event(event) == event_baz
|| built_in_event_ids.contains(&event),
"unexpected event for entry: {:?}",
e
);
}
}
LogEntryData::EventWithPayload(_, _) => (),
LogEntryData::FrontierClock(lc) => {
if e.probe_id == probe_a_id {
// Process A should only know about itself, since it doesn't receive history from anyone else
assert_eq!(lc.id, probe_a_id);
} else if e.probe_id == probe_c_id {
// Process C should have clocks for itself and its direct precursor, B
assert!(lc.id == probe_c_id || lc.id == probe_b_id);
}
}
LogEntryData::TraceClock(lc) => {
if e.probe_id == probe_a_id {
assert_eq!(lc.id, probe_a_id);
} else if e.probe_id == probe_c_id {
assert!(lc.id == probe_c_id || lc.id == probe_b_id);
}
}
LogEntryData::WallClockTime(_) => (),
LogEntryData::EventWithTime(_, _) => (),
LogEntryData::EventWithPayloadWithTime(_, _, _) => (),
LogEntryData::TraceClockWithTime(_, _) => (),
}
}
}
#[test]
fn linear_pair_graph() {
let addrs = find_usable_addrs(1);
let server_addr = addrs[0];
let (shutdown_sender, shutdown_receiver) = ShutdownSignalSender::new(server_addr);
let (server_state_sender, server_state_receiver) = crossbeam::bounded(0);
let session_id = gen_session_id().into();
let f = tempfile::NamedTempFile::new().expect("Could not make temp file");
let output_file_path = PathBuf::from(f.path());
let config = Config {
addr: server_addr,
session_id,
output_file: output_file_path.clone(),
};
let h = thread::spawn(move || {
let mut file = std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(config.output_file)
.expect("Could not open file for writing");
let socket = UdpSocket::bind(config.addr).expect("Could not bind to server socket");
server_state_sender
.send(ServerState::Started)
.expect("Could not send status update");
start_receiving_from_socket(socket, config.session_id, &mut file, shutdown_receiver);
let _ = server_state_sender.send(ServerState::Shutdown);
});
thread::yield_now();
assert_eq!(Ok(ServerState::Started), server_state_receiver.recv());
let mut net = proc_graph::Network::new();
let probe_a_id = modality_probe::ProbeId::new(31).unwrap();
let probe_b_id = modality_probe::ProbeId::new(41).unwrap();
let event_foo = EventLogEntry::Event(modality_probe::EventId::new(7).unwrap());
let event_bar = EventLogEntry::Event(modality_probe::EventId::new(23).unwrap());
const NUM_MESSAGES_FROM_A: usize = 11;
let (network_done_sender, network_done_receiver) = crossbeam::bounded(0);
net.add_process(
"a",
vec!["b"],
make_message_broadcaster_proc(
"a",
probe_a_id,
NUM_MESSAGES_FROM_A,
server_addr,
Some(event_foo),
),
);
net.add_process(
"b",
vec![],
make_message_sink_proc(
probe_b_id,
NUM_MESSAGES_FROM_A,
SendLogReportEveryFewMessages {
n_messages: 3,
collector_addr: server_addr,
},
Some(event_bar),
network_done_sender,
),
);
net.start();
thread::yield_now();
assert_eq!(Ok(()), network_done_receiver.recv());
// Thanks, UDP
std::thread::sleep(std::time::Duration::from_millis(200));
shutdown_sender.shutdown();
assert_eq!(Ok(ServerState::Shutdown), server_state_receiver.recv());
h.join().expect("Couldn't join server handler thread");
let mut file_reader =
std::fs::File::open(&output_file_path).expect("Could not open output file for reading");
let found_log_entries = json::read_log_entries(&mut file_reader)
.expect("Could not read output file as json log entries");
assert!(found_log_entries.len() > 0);
let expected_probe_ids: HashSet<_> = [probe_a_id, probe_b_id].iter().copied().collect();
let built_in_event_ids: HashSet<_> = modality_probe::EventId::INTERNAL_EVENTS
.iter()
.map(|id| id.get_raw())
.collect();
for e in found_log_entries {
assert_eq!(session_id, e.session_id);
assert!(expected_probe_ids.contains(&e.probe_id));
match e.data {
LogEntryData::Event(event) => {
if e.probe_id == probe_a_id {
// Process A should only be writing about event foo or the probe internal events
assert!(
EventLogEntry::Event(event) == event_foo
|| built_in_event_ids.contains(&event.get_raw())
);
} else if e.probe_id == probe_b_id {
// Process B should only be writing about event bar or the probe internals events
assert!(
EventLogEntry::Event(event) == event_bar
|| built_in_event_ids.contains(&event.get_raw()),
"unexpected event for entry: {:?}",
e
);
}
}
LogEntryData::EventWithPayload(_, _) => (),
LogEntryData::FrontierClock(lc) => {
if e.probe_id == probe_a_id {
// Process A should only know about itself, since it doesn't receive history from anyone else
assert_eq!(lc.id, probe_a_id);
} else {
// Process B should have clocks for both process's probe ids
assert!(expected_probe_ids.contains(&lc.id));
}
}
LogEntryData::TraceClock(lc) => {
if e.probe_id == probe_a_id {
assert_eq!(lc.id, probe_a_id);
} else {
assert!(expected_probe_ids.contains(&lc.id));
}
}
LogEntryData::WallClockTime(_) => (),
LogEntryData::EventWithTime(_, _) => (),
LogEntryData::EventWithPayloadWithTime(_, _, _) => (),
LogEntryData::TraceClockWithTime(_, _) => (),
}
}
}
#[test]
fn linear_pair_graph_with_payload() {
let addrs = find_usable_addrs(1);
let server_addr = addrs[0];
let (shutdown_sender, shutdown_receiver) = ShutdownSignalSender::new(server_addr);
let (server_state_sender, server_state_receiver) = crossbeam::bounded(0);
let session_id = gen_session_id().into();
let f = tempfile::NamedTempFile::new().expect("Could not make temp file");
let output_file_path = PathBuf::from(f.path());
let config = Config {
addr: server_addr,
session_id,
output_file: output_file_path.clone(),
};
let h = thread::spawn(move || {
let mut file = std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(config.output_file)
.expect("Could not open file for writing");
let socket = UdpSocket::bind(config.addr).expect("Could not bind to server socket");
server_state_sender
.send(ServerState::Started)
.expect("Could not send status update");
start_receiving_from_socket(socket, config.session_id, &mut file, shutdown_receiver);
let _ = server_state_sender.send(ServerState::Shutdown);
});
thread::yield_now();
assert_eq!(Ok(ServerState::Started), server_state_receiver.recv());
let mut net = proc_graph::Network::new();
let probe_a_id = modality_probe::ProbeId::new(31).unwrap();
let probe_b_id = modality_probe::ProbeId::new(41).unwrap();
let foo_payload = 777;
let foo_id = modality_probe::EventId::new(7).unwrap();
let event_foo = EventLogEntry::EventWithPayload(foo_id, foo_payload);
let bar_payload = 490;
let bar_id = modality_probe::EventId::new(23).unwrap();
let event_bar = EventLogEntry::EventWithPayload(bar_id, bar_payload);
const NUM_MESSAGES_FROM_A: usize = 11;
let (network_done_sender, network_done_receiver) = crossbeam::bounded(0);
net.add_process(
"a",
vec!["b"],
make_message_broadcaster_proc(
"a",
probe_a_id,
NUM_MESSAGES_FROM_A,
server_addr,
Some(event_foo),
),
);
net.add_process(
"b",
vec![],
make_message_sink_proc(
probe_b_id,
NUM_MESSAGES_FROM_A,
SendLogReportEveryFewMessages {
n_messages: 3,
collector_addr: server_addr,
},
Some(event_bar),
network_done_sender,
),
);
net.start();
thread::yield_now();
assert_eq!(Ok(()), network_done_receiver.recv());
// Thanks, UDP
std::thread::sleep(std::time::Duration::from_millis(200));
shutdown_sender.shutdown();
assert_eq!(Ok(ServerState::Shutdown), server_state_receiver.recv());
h.join().expect("Couldn't join server handler thread");
let mut file_reader =
std::fs::File::open(&output_file_path).expect("Could not open output file for reading");
let found_log_entries = json::read_log_entries(&mut file_reader)
.expect("Could not read output file as json log entries");
assert!(found_log_entries.len() > 0);
let expected_probe_ids: HashSet<_> = [probe_a_id, probe_b_id].iter().copied().collect();
for e in found_log_entries {
assert_eq!(session_id, e.session_id);
assert!(expected_probe_ids.contains(&e.probe_id));
match e.data {
LogEntryData::Event(_) => (),
LogEntryData::EventWithPayload(event, payload) => {
if event == foo_id {
assert_eq!(EventLogEntry::EventWithPayload(event, payload), event_foo);
} else if event == bar_id {
assert_eq!(EventLogEntry::EventWithPayload(event, payload), event_bar);
} else if event != modality_probe::EventId::EVENT_PROBE_INITIALIZED {
// it's that the model implementation of
// EventId doesn't or out the marker bits on
// read.
panic!("got unexpected event: {:?}", event);
}
}
LogEntryData::FrontierClock(lc) => {
if e.probe_id == probe_a_id {
// Process A should only know about itself, since it doesn't receive history from anyone else
assert_eq!(lc.id, probe_a_id);
} else {
// Process B should have clocks for both process's probe ids
assert!(expected_probe_ids.contains(&lc.id));
}
}
LogEntryData::TraceClock(lc) => {
if e.probe_id == probe_a_id {
assert_eq!(lc.id, probe_a_id);
} else {
assert!(expected_probe_ids.contains(&lc.id));
}
}
LogEntryData::WallClockTime(_) => (),
LogEntryData::EventWithTime(_, _) => (),
LogEntryData::EventWithPayloadWithTime(_, _, _) => (),
LogEntryData::TraceClockWithTime(_, _) => (),
}
}
}
fn make_message_broadcaster_proc(
proc_name: &'static str,
probe_id: modality_probe::ProbeId,
n_messages: usize,
collector_addr: SocketAddr,
per_iteration_event: Option<EventLogEntry>,
) -> impl Fn(
HashMap<String, std::sync::mpsc::Sender<(String, Vec<u8>)>>,
std::sync::mpsc::Receiver<(String, Vec<u8>)>,
) + Send
+ 'static {
move |id_to_sender, _receiver| {
let mut probe_storage = vec![MaybeUninit::new(0u8); PROBE_STORAGE_BYTES_SIZE];
let mut probe = modality_probe::ModalityProbe::new_with_storage(
&mut probe_storage,
probe_id,
NanosecondResolution::UNSPECIFIED,
WallClockId::local_only(),
RestartCounterProvider::NoRestartTracking,
)
.expect("Could not make probe");
let mut causal_history_blob = vec![0u8; SNAPSHOT_BYTES_SIZE];
for _ in 0..n_messages {
match per_iteration_event {
Some(EventLogEntry::Event(e)) => probe.record_event(e),
Some(EventLogEntry::EventWithPayload(e, payload)) => {
probe.record_event_with_payload(e, payload)
}
_ => (),
}
let causal_history_bytes = probe
.produce_snapshot_bytes(&mut causal_history_blob)
.expect("Could not write history to share with other in-system member");
for destination in id_to_sender.values() {
let history_copy = Vec::from(&causal_history_blob[..causal_history_bytes]);
destination
.send((proc_name.to_string(), history_copy))
.expect("Could not send message to other process");
}
}
let mut log_report_storage = vec![0u8; LOG_REPORT_BYTES_SIZE];
let socket =
UdpSocket::bind(OS_PICK_ADDR_HINT).expect("Could not bind to client socket");
let log_report_bytes = probe
.report(&mut log_report_storage)
.expect("Could not write log report in broadcaster");
if let Some(log_report_bytes) = log_report_bytes {
socket
.send_to(
&log_report_storage[..log_report_bytes.get()],
collector_addr,
)
.expect("Could not send log report to server");
}
}
}
#[derive(Clone, Copy)]
struct SendLogReportEveryFewMessages {
n_messages: usize,
collector_addr: SocketAddr,
}
fn make_message_relay_proc(
proc_name: &'static str,
probe_id: modality_probe::ProbeId,
stop_relaying_after_receiving_n_messages: usize,
send_log_report_every_n_messages: Option<SendLogReportEveryFewMessages>,
per_iteration_event: Option<EventLogEntry>,
) -> impl Fn(
HashMap<String, std::sync::mpsc::Sender<(String, Vec<u8>)>>,
std::sync::mpsc::Receiver<(String, Vec<u8>)>,
) + Send
+ 'static {
move |id_to_sender, receiver| {
let mut probe_storage = vec![MaybeUninit::new(0u8); PROBE_STORAGE_BYTES_SIZE];
let mut probe = modality_probe::ModalityProbe::new_with_storage(
&mut probe_storage,
probe_id,
NanosecondResolution::UNSPECIFIED,
WallClockId::local_only(),
RestartCounterProvider::NoRestartTracking,
)
.expect("Could not make probe");
let socket =
UdpSocket::bind(OS_PICK_ADDR_HINT).expect("Could not bind to client socket");
let mut log_report_storage = vec![0u8; LOG_REPORT_BYTES_SIZE];
let mut causal_history_blob = vec![0u8; SNAPSHOT_BYTES_SIZE];
let mut messages_received = 0;
loop {
let (_msg_source, message) = match receiver.recv() {
Ok(m) => m,
Err(std::sync::mpsc::RecvError) => {
panic!("Received on a channel with no senders!")
}
};
match per_iteration_event {
Some(EventLogEntry::Event(e)) => probe.record_event(e),
Some(EventLogEntry::EventWithPayload(e, payload)) => {
probe.record_event_with_payload(e, payload)
}
_ => (),
}
probe
.merge_snapshot_bytes(&message)
.expect("Could not merge in history");
if messages_received > stop_relaying_after_receiving_n_messages {
continue;
}
let causal_history_bytes = probe
.produce_snapshot_bytes(&mut causal_history_blob)
.expect("Could not write history to share with other in-system member");
for destination in id_to_sender.values() {
let history_copy = Vec::from(&causal_history_blob[..causal_history_bytes]);
destination
.send((proc_name.to_string(), history_copy))
.expect("Could not send message to other process");
}
if let Some(SendLogReportEveryFewMessages {
n_messages,
collector_addr,
}) = send_log_report_every_n_messages
{
if messages_received % n_messages == 0 {
let log_report_bytes = probe
.report(&mut log_report_storage)
.expect("Could not write log report in relayer");
if let Some(log_report_bytes) = log_report_bytes {
socket
.send_to(
&log_report_storage[..log_report_bytes.get()],
collector_addr,
)
.expect("Could not send log report to server");
}
}
}
messages_received += 1;
}
}
}
fn make_message_sink_proc(
probe_id: modality_probe::ProbeId,
stop_after_receiving_n_messages: usize,
send_log_report_every_n_messages: SendLogReportEveryFewMessages,
per_iteration_event: Option<EventLogEntry>,
stopped_sender: crossbeam::Sender<()>,
) -> impl Fn(
HashMap<String, std::sync::mpsc::Sender<(String, Vec<u8>)>>,
std::sync::mpsc::Receiver<(String, Vec<u8>)>,
) + Send
+ 'static {
move |_id_to_sender, receiver| {
let mut probe_storage = vec![MaybeUninit::new(0u8); PROBE_STORAGE_BYTES_SIZE];
let mut probe = modality_probe::ModalityProbe::new_with_storage(
&mut probe_storage,
probe_id,
NanosecondResolution::UNSPECIFIED,
WallClockId::local_only(),
RestartCounterProvider::NoRestartTracking,
)
.expect("Could not make probe");
let socket =
UdpSocket::bind(OS_PICK_ADDR_HINT).expect("Could not bind to client socket");
let mut log_report_storage = vec![0u8; LOG_REPORT_BYTES_SIZE];
let mut messages_received = 0;
while messages_received < stop_after_receiving_n_messages {
let (_msg_source, message) = match receiver.recv() {
Ok(m) => m,
Err(std::sync::mpsc::RecvError) => {
panic!("Received on a channel with no senders!")
}
};
probe
.merge_snapshot_bytes(&message)
.expect("Could not merge in history");
match per_iteration_event {
Some(EventLogEntry::Event(e)) => probe.record_event(e),
Some(EventLogEntry::EventWithPayload(e, payload)) => {
probe.record_event_with_payload(e, payload)
}
_ => (),
}
if messages_received % send_log_report_every_n_messages.n_messages == 0 {
let log_report_bytes = probe
.report(&mut log_report_storage)
.expect("Could not write log report in sink");
if let Some(log_report_bytes) = log_report_bytes {
socket
.send_to(
&log_report_storage[..log_report_bytes.get()],
send_log_report_every_n_messages.collector_addr,
)
.expect("Could not send log report to server");
}
}
messages_received += 1;
}
stopped_sender
.send(())
.expect("Could not inform outside world the process is done");
}
}
}
| 40.474528 | 117 | 0.528052 |
f8b7e51038247ffca0929d357f8b819dc043c35a | 1,856 | use std::path::Path;
#[derive(Debug)]
pub struct Config {
pub pidfile: &'static Path,
pub workdir: &'static Path,
pub filesdir: &'static Path,
pub daemonize: bool,
pub hz: u32,
pub bind: Vec<String>,
pub port: u16,
pub tcp_keepalive: u32,
pub tcp_backlog: i32,
pub timeout: u64,
pub unixsocket: Option<String>,
pub unixsocketperm: u32,
pub syslog_enabled: bool,
pub syslog_ident: String,
pub syslog_facility: String,
}
//#[derive(Debug)]
//pub enum ConfigError {
// InvalidFormat,
// InvalidParameter,
// IOError(IOError),
// FileNotFound,
//}
impl Config {
pub fn default(port: u16) -> Config {
Config {
pidfile: Path::new("/var/run/irbis/ifsd.pid"),
workdir: Path::new("/srv/irbisfs/content"),
filesdir: Path::new("/srv/irbisfs/content/files"),
daemonize: false,
hz: 10,
// FIXME список адресов в конфигурации должен быть уже в валидном виде
bind: vec![],
// FIXME порт должен соответствовать каждому интерфейсу
port: port,
tcp_keepalive: 0,
tcp_backlog: 511,
timeout: 0,
unixsocket: None,
unixsocketperm: 0700,
syslog_enabled: false,
syslog_ident: "ifsd".to_owned(),
syslog_facility: "local0".to_owned(),
}
}
pub fn new() -> Config {
Self::default(1313)
}
// FIXME никаких заглушек "на случай если нет", список адресов должен формироваться сразу в конфигурации
pub fn addresses(&self) -> Vec<(String, u16)> {
if self.bind.len() == 0 {
vec![("127.0.0.1".to_owned(), self.port)]
} else {
self.bind.iter().map(|s| (s.clone(), self.port)).collect::<Vec<_>>()
}
}
}
| 24.103896 | 108 | 0.560884 |
f9c15f2b2185ef96df22518d3578c79f50d4cf61 | 16,028 | use ckb_chain::chain::{ChainController, ChainService};
use ckb_chain_spec::consensus::{Consensus, ProposalWindow};
use ckb_crypto::secp::Privkey;
use ckb_dao::DaoCalculator;
use ckb_dao_utils::genesis_dao_data;
use ckb_notify::NotifyService;
use ckb_shared::{
shared::{Shared, SharedBuilder},
Snapshot,
};
use ckb_store::ChainStore;
use ckb_system_scripts::BUNDLED_CELL;
use ckb_test_chain_utils::always_success_cell;
use ckb_traits::chain_provider::ChainProvider;
use ckb_types::{
bytes::Bytes,
core::{
capacity_bytes,
cell::{resolve_transaction, OverlayCellProvider, TransactionsProvider},
BlockBuilder, BlockView, Capacity, HeaderView, ScriptHashType, TransactionBuilder,
TransactionView,
},
h160, h256,
packed::{CellDep, CellInput, CellOutput, OutPoint, ProposalShortId, Script},
prelude::*,
H160, H256, U256,
};
use lazy_static::lazy_static;
use rand::random;
use std::collections::HashSet;
#[derive(Default)]
pub struct Chains(pub Vec<(ChainController, Shared)>);
impl Chains {
pub fn push(&mut self, chain: (ChainController, Shared)) {
self.0.push(chain);
}
}
pub fn new_always_success_chain(txs_size: usize, chains_num: usize) -> Chains {
let (_, _, always_success_script) = always_success_cell();
let tx = create_always_success_tx();
let dao = genesis_dao_data(&tx).unwrap();
// create genesis block with N txs
let transactions: Vec<TransactionView> = (0..txs_size)
.map(|i| {
let data = Bytes::from(i.to_le_bytes().to_vec());
TransactionBuilder::default()
.input(CellInput::new(OutPoint::null(), 0))
.output(
CellOutput::new_builder()
.capacity(capacity_bytes!(50_000).pack())
.lock(always_success_script.clone())
.build(),
)
.output_data(data.pack())
.build()
})
.collect();
let genesis_block = BlockBuilder::default()
.dao(dao.pack())
.difficulty(U256::from(1000u64).pack())
.transaction(tx)
.transactions(transactions)
.build();
let mut consensus = Consensus::default()
.set_cellbase_maturity(0)
.set_genesis_block(genesis_block);
consensus.tx_proposal_window = ProposalWindow(1, 10);
let mut chains = Chains::default();
for _ in 0..chains_num {
let (shared, table) = SharedBuilder::default()
.consensus(consensus.clone())
.build()
.unwrap();
let notify = NotifyService::default().start::<&str>(None);
let chain_service = ChainService::new(shared.clone(), table, notify);
chains.push((chain_service.start::<&str>(None), shared));
}
chains
}
pub fn create_always_success_tx() -> TransactionView {
let (ref always_success_cell, ref always_success_cell_data, ref script) = always_success_cell();
TransactionBuilder::default()
.witness(script.clone().into_witness())
.input(CellInput::new(OutPoint::null(), 0))
.output(always_success_cell.clone())
.output_data(always_success_cell_data.pack())
.build()
}
pub fn create_always_success_cellbase(shared: &Shared, parent: &HeaderView) -> TransactionView {
let (_, _, always_success_script) = always_success_cell();
let capacity = calculate_reward(shared, parent);
TransactionBuilder::default()
.input(CellInput::new_cellbase_input(parent.number() + 1))
.output(
CellOutput::new_builder()
.capacity(capacity.pack())
.lock(always_success_script.clone())
.build(),
)
.output_data(Bytes::new().pack())
.witness(always_success_script.clone().into_witness())
.build()
}
pub fn gen_always_success_block(
blocks: &mut Vec<BlockView>,
p_block: &BlockView,
shared: &Shared,
) -> BlockView {
let tx = create_always_success_tx();
let always_success_out_point = OutPoint::new(tx.hash().unpack(), 0);
let (_, _, always_success_script) = always_success_cell();
let (number, timestamp, difficulty) = (
p_block.header().number() + 1,
p_block.header().timestamp() + 10000,
p_block.header().difficulty() + U256::from(1u64),
);
let cellbase = create_always_success_cellbase(shared, &p_block.header());
// spent n-2 block's tx and proposal n-1 block's tx
let transactions: Vec<TransactionView> = if blocks.len() > 1 {
let pp_block = shared
.store()
.get_block(&p_block.data().header().raw().parent_hash())
.expect("gen_block get pp_block");
pp_block
.transactions()
.iter()
.skip(1)
.map(|tx| {
create_transaction(
&tx.hash().unpack(),
always_success_script.clone(),
always_success_out_point.clone(),
)
})
.collect()
} else {
vec![]
};
let proposals: Vec<ProposalShortId> = p_block
.transactions()
.iter()
.skip(1)
.map(|tx| {
create_transaction(
&tx.hash().unpack(),
always_success_script.clone(),
always_success_out_point.clone(),
)
.proposal_short_id()
})
.collect();
let mut txs_to_resolve = vec![cellbase.clone()];
txs_to_resolve.extend_from_slice(&transactions);
let dao = dao_data(shared, &p_block.header(), &txs_to_resolve);
let block = BlockBuilder::default()
.transaction(cellbase)
.transactions(transactions)
.proposals(proposals)
.parent_hash(p_block.hash())
.number(number.pack())
.timestamp(timestamp.pack())
.difficulty(difficulty.pack())
.nonce(random::<u64>().pack())
.dao(dao.pack())
.build();
blocks.push(block.clone());
block
}
const PRIVKEY: H256 = h256!("0xb2b3324cece882bca684eaf202667bb56ed8e8c2fd4b4dc71f615ebd6d9055a5");
const PUBKEY_HASH: H160 = h160!("0x779e5930892a0a9bf2fedfe048f685466c7d0396");
lazy_static! {
static ref SECP_DATA_CELL: (CellOutput, Bytes) = {
let raw_data = BUNDLED_CELL
.get("specs/cells/secp256k1_data")
.expect("load secp256k1_blake160_sighash_all");
let data: Bytes = raw_data[..].into();
let cell = CellOutput::new_builder()
.capacity(Capacity::bytes(data.len()).unwrap().pack())
.build();
(cell, data)
};
static ref SECP_CELL: (CellOutput, Bytes, Script) = {
let raw_data = BUNDLED_CELL
.get("specs/cells/secp256k1_blake160_sighash_all")
.expect("load secp256k1_blake160_sighash_all");
let data: Bytes = raw_data[..].into();
let cell = CellOutput::new_builder()
.capacity(Capacity::bytes(data.len()).unwrap().pack())
.build();
let script = Script::new_builder()
.code_hash(CellOutput::calc_data_hash(&data).pack())
.args(vec![Bytes::from(PUBKEY_HASH.as_bytes()).pack()].pack())
.hash_type(ScriptHashType::Data.pack())
.build();
(cell, data, script)
};
}
pub fn secp_cell() -> &'static (CellOutput, Bytes, Script) {
&SECP_CELL
}
pub fn secp_data_cell() -> &'static (CellOutput, Bytes) {
&SECP_DATA_CELL
}
pub fn create_secp_tx() -> TransactionView {
let (ref secp_data_cell, ref secp_data_cell_data) = secp_data_cell();
let (ref secp_cell, ref secp_cell_data, ref script) = secp_cell();
let outputs = vec![secp_data_cell.clone(), secp_cell.clone()];
let outputs_data = vec![secp_data_cell_data.pack(), secp_cell_data.pack()];
TransactionBuilder::default()
.witness(script.clone().into_witness())
.input(CellInput::new(OutPoint::null(), 0))
.outputs(outputs)
.outputs_data(outputs_data)
.build()
}
pub fn new_secp_chain(txs_size: usize, chains_num: usize) -> Chains {
let (_, _, secp_script) = secp_cell();
let tx = create_secp_tx();
let dao = genesis_dao_data(&tx).unwrap();
// create genesis block with N txs
let transactions: Vec<TransactionView> = (0..txs_size)
.map(|i| {
let data = Bytes::from(i.to_le_bytes().to_vec());
let output = CellOutput::new_builder()
.capacity(capacity_bytes!(50_000).pack())
.lock(secp_script.clone())
.build();
TransactionBuilder::default()
.input(CellInput::new(OutPoint::null(), 0))
.output(output.clone())
.output(output)
.output_data(data.pack())
.output_data(data.pack())
.build()
})
.collect();
let genesis_block = BlockBuilder::default()
.difficulty(U256::from(1000u64).pack())
.dao(dao.pack())
.transaction(tx)
.transactions(transactions)
.build();
let mut consensus = Consensus::default()
.set_cellbase_maturity(0)
.set_genesis_block(genesis_block);
consensus.tx_proposal_window = ProposalWindow(1, 10);
let mut chains = Chains::default();
for _ in 0..chains_num {
let (shared, table) = SharedBuilder::default()
.consensus(consensus.clone())
.build()
.unwrap();
let notify = NotifyService::default().start::<&str>(None);
let chain_service = ChainService::new(shared.clone(), table, notify);
chains.push((chain_service.start::<&str>(None), shared));
}
chains
}
pub fn create_secp_cellbase(shared: &Shared, parent: &HeaderView) -> TransactionView {
let (_, _, secp_script) = secp_cell();
let capacity = calculate_reward(shared, parent);
TransactionBuilder::default()
.input(CellInput::new_cellbase_input(parent.number() + 1))
.output(
CellOutput::new_builder()
.capacity(capacity.pack())
.lock(secp_script.clone())
.build(),
)
.output_data(Bytes::new().pack())
.witness(secp_script.clone().into_witness())
.build()
}
pub fn gen_secp_block(
blocks: &mut Vec<BlockView>,
p_block: &BlockView,
shared: &Shared,
) -> BlockView {
let tx = create_secp_tx();
let secp_cell_deps = vec![
CellDep::new_builder()
.out_point(OutPoint::new(tx.hash().unpack(), 0))
.build(),
CellDep::new_builder()
.out_point(OutPoint::new(tx.hash().unpack(), 1))
.build(),
];
let (_, _, secp_script) = secp_cell();
let (number, timestamp, difficulty) = (
p_block.header().number() + 1,
p_block.header().timestamp() + 10000,
p_block.header().difficulty() + U256::from(1u64),
);
let cellbase = create_secp_cellbase(shared, &p_block.header());
// spent n-2 block's tx and proposal n-1 block's tx
let transactions: Vec<TransactionView> = if blocks.len() > 1 {
let pp_block = shared
.store()
.get_block(&p_block.data().header().raw().parent_hash())
.expect("gen_block get pp_block");
pp_block
.transactions()
.iter()
.skip(1)
.map(|tx| {
create_2out_transaction(
tx.output_pts(),
secp_script.clone(),
secp_cell_deps.clone(),
)
})
.collect()
} else {
vec![]
};
let proposals: Vec<ProposalShortId> = p_block
.transactions()
.iter()
.skip(1)
.map(|tx| {
create_2out_transaction(tx.output_pts(), secp_script.clone(), secp_cell_deps.clone())
.proposal_short_id()
})
.collect();
let mut txs_to_resolve = vec![cellbase.clone()];
txs_to_resolve.extend_from_slice(&transactions);
let dao = dao_data(shared, &p_block.header(), &txs_to_resolve);
let block = BlockBuilder::default()
.transaction(cellbase)
.transactions(transactions)
.proposals(proposals)
.parent_hash(p_block.hash())
.number(number.pack())
.timestamp(timestamp.pack())
.difficulty(difficulty.pack())
.nonce(random::<u64>().pack())
.dao(dao.pack())
.build();
blocks.push(block.clone());
block
}
fn create_transaction(parent_hash: &H256, lock: Script, dep: OutPoint) -> TransactionView {
let data: Bytes = (0..255).collect();
TransactionBuilder::default()
.output(
CellOutput::new_builder()
.capacity(capacity_bytes!(50_000).pack())
.lock(lock.clone())
.build(),
)
.output_data(data.pack())
.input(CellInput::new(OutPoint::new(parent_hash.to_owned(), 0), 0))
.cell_dep(CellDep::new_builder().out_point(dep).build())
.build()
}
fn create_2out_transaction(
inputs: Vec<OutPoint>,
lock: Script,
cell_deps: Vec<CellDep>,
) -> TransactionView {
let data: Bytes = (0..255).collect();
let cell_inputs = inputs.into_iter().map(|pts| CellInput::new(pts, 0));
let cell_output = CellOutput::new_builder()
.capacity(capacity_bytes!(50_000).pack())
.lock(lock.clone())
.build();
let raw = TransactionBuilder::default()
.output(cell_output.clone())
.output(cell_output)
.output_data(data.pack())
.output_data(data.pack())
.inputs(cell_inputs)
.cell_deps(cell_deps)
.build();
let privkey: Privkey = PRIVKEY.into();
let mut blake2b = ckb_hash::new_blake2b();
let mut message = [0u8; 32];
blake2b.update(&raw.hash().raw_data()[..]);
blake2b.finalize(&mut message);
let message = H256::from(message);
let witness: Bytes = privkey
.sign_recoverable(&message)
.expect("sign tx")
.serialize()
.into();
raw.as_advanced_builder()
.witness(vec![witness.pack()].pack())
.witness(vec![witness.pack()].pack())
.build()
}
pub fn dao_data(shared: &Shared, parent: &HeaderView, txs: &[TransactionView]) -> Bytes {
let mut seen_inputs = HashSet::new();
// In case of resolving errors, we just output a dummp DAO field,
// since those should be the cases where we are testing invalid
// blocks
let transactions_provider = TransactionsProvider::new(txs.iter());
let snapshot: &Snapshot = &shared.snapshot();
let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, snapshot);
let rtxs = txs.iter().try_fold(vec![], |mut rtxs, tx| {
let rtx = resolve_transaction(tx, &mut seen_inputs, &overlay_cell_provider, snapshot);
match rtx {
Ok(rtx) => {
rtxs.push(rtx);
Ok(rtxs)
}
Err(e) => Err(e),
}
});
let rtxs = rtxs.expect("dao_data resolve_transaction");
let calculator = DaoCalculator::new(shared.consensus(), shared.store());
calculator
.dao_field(&rtxs, &parent)
.expect("calculator dao_field")
}
pub(crate) fn calculate_reward(shared: &Shared, parent: &HeaderView) -> Capacity {
let number = parent.number() + 1;
let target_number = shared.consensus().finalize_target(number).unwrap();
let target = shared
.store()
.get_ancestor(&parent.hash(), target_number)
.expect("calculate_reward get_ancestor");
let calculator = DaoCalculator::new(shared.consensus(), shared.store());
calculator
.primary_block_reward(&target)
.expect("calculate_reward primary_block_reward")
.safe_add(calculator.secondary_block_reward(&target).unwrap())
.expect("calculate_reward safe_add")
}
| 33.391667 | 100 | 0.59552 |
6157432ffe705afd5102c4e53c87214db8fee599 | 56,739 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::io::Directory,
anyhow::{format_err, Context, Result},
fuchsia_async::TimeoutExt,
futures::future::{join, join_all, BoxFuture},
futures::FutureExt,
moniker::{AbsoluteMonikerBase, ChildMonikerBase, PartialAbsoluteMoniker, PartialChildMoniker},
routing::component_id_index::ComponentInstanceId,
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
static SPACER: &str = " ";
static CAPABILITY_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(1);
async fn does_url_match_query(query: &str, hub_dir: &Directory) -> bool {
let url = hub_dir.read_file("url").await.expect("Could not read component URL");
url.contains(query)
}
// Given a v2 hub directory, collect components whose component name or URL contains |query| as a
// substring. This function is recursive and will find matching CMX and CML components.
pub async fn find_components(query: String, hub_dir: Directory) -> Result<Vec<Component>> {
find_components_internal(query, String::new(), PartialAbsoluteMoniker::root(), hub_dir).await
}
fn find_components_internal(
query: String,
name: String,
moniker: PartialAbsoluteMoniker,
hub_dir: Directory,
) -> BoxFuture<'static, Result<Vec<Component>>> {
async move {
let mut futures = vec![];
let children_dir = hub_dir.open_dir_readable("children")?;
for child_name in children_dir.entries().await? {
let child_moniker = PartialChildMoniker::parse(&child_name)?;
let child_moniker = moniker.child(child_moniker);
let child_hub_dir = children_dir.open_dir_readable(&child_name)?;
let child_future =
find_components_internal(query.clone(), child_name, child_moniker, child_hub_dir);
futures.push(child_future);
}
if name == "appmgr" {
let realm_dir = hub_dir.open_dir_readable("exec/out/hub")?;
let appmgr_future = find_cmx_realms(query.clone(), moniker.clone(), realm_dir);
futures.push(appmgr_future);
}
let results = join_all(futures).await;
let mut matching_components = vec![];
for result in results {
let mut result = result?;
matching_components.append(&mut result);
}
let should_include = moniker.to_string_without_instances().contains(&query)
|| does_url_match_query(&query, &hub_dir).await;
if should_include {
let component = Component::parse(moniker, &hub_dir).await?;
matching_components.push(component);
}
Ok(matching_components)
}
.boxed()
}
// Given a v1 realm directory, collect components whose URL matches the given |query|.
// |moniker| corresponds to the moniker of the current realm.
fn find_cmx_realms(
query: String,
moniker: PartialAbsoluteMoniker,
hub_dir: Directory,
) -> BoxFuture<'static, Result<Vec<Component>>> {
async move {
let c_dir = hub_dir.open_dir_readable("c")?;
let c_future = find_cmx_components_in_c_dir(query.clone(), moniker.clone(), c_dir);
let r_dir = hub_dir.open_dir_readable("r")?;
let r_future = find_cmx_realms_in_r_dir(query, moniker, r_dir);
let (matching_components_c, matching_components_r) = join(c_future, r_future).await;
let mut matching_components_c = matching_components_c?;
let mut matching_components_r = matching_components_r?;
matching_components_c.append(&mut matching_components_r);
Ok(matching_components_c)
}
.boxed()
}
// Given a v1 component directory, collect components whose URL matches the given |query|.
// |moniker| corresponds to the moniker of the current component.
fn find_cmx_components(
query: String,
moniker: PartialAbsoluteMoniker,
hub_dir: Directory,
) -> BoxFuture<'static, Result<Vec<Component>>> {
async move {
let mut matching_components = vec![];
// Component runners can have a `c` dir with child components
if hub_dir.exists("c").await? {
let c_dir = hub_dir.open_dir_readable("c")?;
let mut child_components =
find_cmx_components_in_c_dir(query.clone(), moniker.clone(), c_dir).await?;
matching_components.append(&mut child_components);
}
let should_include = moniker.to_string_without_instances().contains(&query)
|| does_url_match_query(&query, &hub_dir).await;
if should_include {
let component = Component::parse_cmx(moniker, hub_dir).await?;
matching_components.push(component);
}
Ok(matching_components)
}
.boxed()
}
async fn find_cmx_components_in_c_dir(
query: String,
moniker: PartialAbsoluteMoniker,
c_dir: Directory,
) -> Result<Vec<Component>> {
// Get all CMX child components
let child_component_names = c_dir.entries().await?;
let mut future_children = vec![];
for child_component_name in child_component_names {
let child_moniker = PartialChildMoniker::parse(&child_component_name)?;
let child_moniker = moniker.child(child_moniker);
let job_ids_dir = c_dir.open_dir_readable(&child_component_name)?;
let hub_dirs = open_all_job_ids(job_ids_dir).await?;
for hub_dir in hub_dirs {
let future_child = find_cmx_components(query.clone(), child_moniker.clone(), hub_dir);
future_children.push(future_child);
}
}
let results = join_all(future_children).await;
let mut flattened_components = vec![];
for result in results {
let mut components = result?;
flattened_components.append(&mut components);
}
Ok(flattened_components)
}
async fn find_cmx_realms_in_r_dir(
query: String,
moniker: PartialAbsoluteMoniker,
r_dir: Directory,
) -> Result<Vec<Component>> {
// Get all CMX child realms
let mut future_realms = vec![];
for child_realm_name in r_dir.entries().await? {
let child_moniker = PartialChildMoniker::parse(&child_realm_name)?;
let child_moniker = moniker.child(child_moniker);
let job_ids_dir = r_dir.open_dir_readable(&child_realm_name)?;
let hub_dirs = open_all_job_ids(job_ids_dir).await?;
for hub_dir in hub_dirs {
let future_realm = find_cmx_realms(query.clone(), child_moniker.clone(), hub_dir);
future_realms.push(future_realm);
}
}
let results = join_all(future_realms).await;
let mut flattened_components = vec![];
for result in results {
let mut components = result?;
flattened_components.append(&mut components);
}
Ok(flattened_components)
}
async fn open_all_job_ids(job_ids_dir: Directory) -> Result<Vec<Directory>> {
// Recurse on the job_ids
let mut dirs = vec![];
for job_id in job_ids_dir.entries().await? {
let dir = job_ids_dir.open_dir_readable(&job_id)?;
dirs.push(dir);
}
Ok(dirs)
}
// Get all entries in a capabilities directory. If there is a "svc" directory, traverse it and
// collect all protocol names as well.
async fn get_capabilities(capability_dir: Directory) -> Result<Vec<String>> {
let mut entries = capability_dir.entries().await?;
for (index, name) in entries.iter().enumerate() {
if name == "svc" {
entries.remove(index);
let svc_dir = capability_dir.open_dir_readable("svc")?;
let mut svc_entries = svc_dir.entries().await?;
entries.append(&mut svc_entries);
break;
}
}
entries.sort_unstable();
Ok(entries)
}
/// Additional information about components that are using the ELF runner
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(Debug, Eq, PartialEq)]
pub struct ElfRuntime {
pub job_id: u32,
pub process_id: Option<u32>,
pub process_start_time: Option<i64>,
pub process_start_time_utc_estimate: Option<String>,
}
impl ElfRuntime {
async fn parse(elf_dir: Directory) -> Result<Self> {
let (job_id, process_id, process_start_time, process_start_time_utc_estimate) = futures::join!(
elf_dir.read_file("job_id"),
elf_dir.read_file("process_id"),
elf_dir.read_file("process_start_time"),
elf_dir.read_file("process_start_time_utc_estimate"),
);
let job_id = job_id?.parse::<u32>().context("Job ID is not u32")?;
let process_id = Some(process_id?.parse::<u32>().context("Process ID is not u32")?);
let process_start_time =
process_start_time.ok().map(|time_string| time_string.parse::<i64>().ok()).flatten();
let process_start_time_utc_estimate = process_start_time_utc_estimate.ok();
Ok(Self { job_id, process_id, process_start_time, process_start_time_utc_estimate })
}
async fn parse_cmx(hub_dir: &Directory) -> Result<Self> {
let (job_id, process_id) =
futures::join!(hub_dir.read_file("job-id"), hub_dir.read_file("process-id"),);
let job_id = job_id?.parse::<u32>().context("Job ID is not u32")?;
let process_id = if hub_dir.exists("process-id").await? {
Some(process_id?.parse::<u32>().context("Process ID is not u32")?)
} else {
None
};
Ok(Self {
job_id,
process_id,
process_start_time: None,
process_start_time_utc_estimate: None,
})
}
}
impl std::fmt::Display for ElfRuntime {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "Job ID: {}", self.job_id)?;
if let Some(process_id) = &self.process_id {
writeln!(f, "Process ID: {}", process_id)?;
}
if let Some(ticks) = &self.process_start_time {
writeln!(f, "Process Start Time (ticks): {}", ticks)?;
} else {
writeln!(f, "Process Start Time (ticks): (not available)")?;
}
if let Some(utc_estimate) = &self.process_start_time_utc_estimate {
writeln!(f, "Process Start Time (UTC estimate): {}", utc_estimate)?;
} else {
writeln!(f, "Process Start Time (UTC estimate): (not available)")?;
}
Ok(())
}
}
/// Additional information about components that are running
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(Debug, Eq, PartialEq)]
pub struct Execution {
pub elf_runtime: Option<ElfRuntime>,
pub merkle_root: Option<String>,
pub outgoing_capabilities: Option<Vec<String>>,
}
impl Execution {
async fn parse(exec_dir: Directory) -> Result<Self> {
let in_dir = exec_dir.open_dir_readable("in")?;
let merkle_root = if in_dir.exists("pkg").await? {
let pkg_dir = in_dir.open_dir_readable("pkg")?;
if pkg_dir.exists("meta").await? {
pkg_dir.read_file("meta").await.ok()
} else {
None
}
} else {
None
};
let elf_runtime = if exec_dir.exists("runtime").await? {
let runtime_dir = exec_dir.open_dir_readable("runtime")?;
// Some runners may not serve the runtime directory, so attempting to get the entries
// may fail. This is normal and should be treated as no ELF runtime.
if let Ok(true) = runtime_dir.exists("elf").await {
let elf_dir = runtime_dir.open_dir_readable("elf")?;
Some(ElfRuntime::parse(elf_dir).await?)
} else {
None
}
} else {
None
};
let outgoing_capabilities = if exec_dir.exists("out").await? {
let out_dir = exec_dir.open_dir_readable("out")?;
get_capabilities(out_dir)
.on_timeout(CAPABILITY_TIMEOUT, || {
Err(format_err!("Timeout occurred opening `out` dir"))
})
.await
.ok()
} else {
// The directory doesn't exist. This is probably because
// there is no runtime on the component.
None
};
Ok(Self { elf_runtime, merkle_root, outgoing_capabilities })
}
async fn parse_cmx(hub_dir: &Directory) -> Result<Self> {
let in_dir = hub_dir.open_dir_readable("in")?;
let merkle_root = if in_dir.exists("pkg").await? {
let pkg_dir = in_dir.open_dir_readable("pkg")?;
if pkg_dir.exists("meta").await? {
pkg_dir.read_file("meta").await.ok()
} else {
None
}
} else {
None
};
let elf_runtime = Some(ElfRuntime::parse_cmx(hub_dir).await?);
let outgoing_capabilities = if hub_dir.exists("out").await? {
let out_dir = hub_dir.open_dir_readable("out")?;
get_capabilities(out_dir)
.on_timeout(CAPABILITY_TIMEOUT, || {
Err(format_err!("Timeout occurred opening `out` dir"))
})
.await
.ok()
} else {
// The directory doesn't exist. This is probably because
// there is no runtime on the component.
None
};
Ok(Self { elf_runtime, merkle_root, outgoing_capabilities })
}
}
impl std::fmt::Display for Execution {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(runtime) = &self.elf_runtime {
write!(f, "{}", runtime)?;
}
if let Some(merkle_root) = &self.merkle_root {
writeln!(f, "Merkle root: {}", merkle_root)?;
}
if let Some(outgoing_capabilities) = &self.outgoing_capabilities {
writeln!(f, "Outgoing Capabilities ({}):", outgoing_capabilities.len())?;
for capability in outgoing_capabilities {
writeln!(f, "{}{}", SPACER, capability)?;
}
}
Ok(())
}
}
/// Additional information about components that are resolved
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
pub struct Resolved {
pub incoming_capabilities: Vec<String>,
pub exposed_capabilities: Vec<String>,
pub instance_id: Option<ComponentInstanceId>,
}
impl Resolved {
async fn parse(resolved_dir: Directory) -> Result<Self> {
let incoming_capabilities = {
let use_dir = resolved_dir.open_dir_readable("use")?;
get_capabilities(use_dir).await?
};
let exposed_capabilities = {
let expose_dir = resolved_dir.open_dir_readable("expose")?;
get_capabilities(expose_dir).await?
};
let instance_id = resolved_dir.read_file("instance_id").await.ok();
Ok(Self { incoming_capabilities, exposed_capabilities, instance_id })
}
async fn parse_cmx(hub_dir: &Directory) -> Result<Self> {
let incoming_capabilities = {
let in_dir = hub_dir.open_dir_readable("in")?;
get_capabilities(in_dir).await?
};
Ok(Self { incoming_capabilities, exposed_capabilities: vec![], instance_id: None })
}
}
impl std::fmt::Display for Resolved {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(instance_id) = &self.instance_id {
writeln!(f, "Instance ID: {}", instance_id)?;
}
writeln!(f, "Incoming Capabilities ({}):", self.incoming_capabilities.len())?;
for capability in &self.incoming_capabilities {
writeln!(f, "{}{}", SPACER, capability)?;
}
writeln!(f, "Exposed Capabilities ({}):", self.exposed_capabilities.len())?;
for capability in &self.exposed_capabilities {
writeln!(f, "{}{}", SPACER, capability)?;
}
Ok(())
}
}
/// Basic information about a component for the `show` command.
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
pub struct Component {
pub moniker: PartialAbsoluteMoniker,
pub url: String,
pub component_type: String,
pub execution: Option<Execution>,
pub resolved: Option<Resolved>,
}
impl Component {
async fn parse(moniker: PartialAbsoluteMoniker, hub_dir: &Directory) -> Result<Component> {
let resolved = if hub_dir.exists("resolved").await? {
let resolved_dir = hub_dir.open_dir_readable("resolved")?;
Some(Resolved::parse(resolved_dir).await?)
} else {
None
};
let execution = if hub_dir.exists("exec").await? {
let exec_dir = hub_dir.open_dir_readable("exec")?;
Some(Execution::parse(exec_dir).await?)
} else {
None
};
let (url, component_type) =
futures::join!(hub_dir.read_file("url"), hub_dir.read_file("component_type"),);
let url = url?;
let component_type = format!("CML {} component", component_type?);
Ok(Component { moniker, url, component_type, execution, resolved })
}
async fn parse_cmx(moniker: PartialAbsoluteMoniker, hub_dir: Directory) -> Result<Component> {
let resolved = Some(Resolved::parse_cmx(&hub_dir).await?);
let execution = Some(Execution::parse_cmx(&hub_dir).await?);
let url = hub_dir.read_file("url").await?;
let component_type = "CMX component".to_string();
Ok(Component { moniker, url, component_type, execution, resolved })
}
}
impl std::fmt::Display for Component {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "Moniker: {}", self.moniker)?;
writeln!(f, "URL: {}", self.url)?;
writeln!(f, "Type: {}", self.component_type)?;
if let Some(resolved) = &self.resolved {
writeln!(f, "Component State: Resolved")?;
write!(f, "{}", resolved)?;
} else {
writeln!(f, "Component State: Unresolved")?;
}
if let Some(execution) = &self.execution {
writeln!(f, "Execution State: Running")?;
write!(f, "{}", execution)?;
} else {
writeln!(f, "Execution State: Stopped")?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use {
std::fs::{self, File},
std::io::Write,
tempfile::TempDir,
};
#[fuchsia_async::run_singlethreaded(test)]
async fn cml_find_by_name() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- stash
// |- children
// |- component_type
// |- url
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let stash = root.join("children/stash");
fs::create_dir(&stash).unwrap();
fs::create_dir(stash.join("children")).unwrap();
File::create(stash.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(stash.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm".as_bytes())
.unwrap();
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.moniker, vec!["stash"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm");
assert_eq!(component.component_type, "CML static component");
assert!(component.resolved.is_none());
assert!(component.execution.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn cml_find_by_url() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- abcd
// |- children
// |- component_type
// |- url
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let stash = root.join("children/abcd");
fs::create_dir(&stash).unwrap();
fs::create_dir(stash.join("children")).unwrap();
File::create(stash.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(stash.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/stash#meta/stash.cm".as_bytes())
.unwrap();
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.moniker, vec!["abcd"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/stash#meta/stash.cm");
assert_eq!(component.component_type, "CML static component");
assert!(component.resolved.is_none());
assert!(component.execution.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn nested_cml() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- abcd
// |- children
// |- efgh
// |- children
// |- component_type
// |- url
// |- component_type
// |- url
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let abcd = root.join("children/abcd");
fs::create_dir(&abcd).unwrap();
fs::create_dir(abcd.join("children")).unwrap();
File::create(abcd.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(abcd.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm".as_bytes())
.unwrap();
{
let efgh = abcd.join("children/efgh");
fs::create_dir(&efgh).unwrap();
fs::create_dir(efgh.join("children")).unwrap();
File::create(efgh.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(efgh.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/efgh#meta/efgh.cm".as_bytes())
.unwrap();
}
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("efgh".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.moniker, vec!["abcd", "efgh"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/efgh#meta/efgh.cm");
assert_eq!(component.component_type, "CML static component");
assert!(component.resolved.is_none());
assert!(component.execution.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn multiple_cml() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- stash_1
// |- children
// |- component_type
// |- url
// |- stash_2
// |- children
// |- component_type
// |- url
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let stash_1 = root.join("children/stash_1");
fs::create_dir(&stash_1).unwrap();
fs::create_dir(stash_1.join("children")).unwrap();
File::create(stash_1.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(stash_1.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm".as_bytes())
.unwrap();
}
{
let stash_2 = root.join("children/stash_2");
fs::create_dir(&stash_2).unwrap();
fs::create_dir(stash_2.join("children")).unwrap();
File::create(stash_2.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(stash_2.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm".as_bytes())
.unwrap();
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 2);
let component_1 = &components[0];
assert_eq!(component_1.moniker, vec!["stash_1"].into());
assert_eq!(component_1.url, "fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm");
assert_eq!(component_1.component_type, "CML static component");
assert!(component_1.resolved.is_none());
assert!(component_1.execution.is_none());
let component_2 = &components[1];
assert_eq!(component_2.moniker, vec!["stash_2"].into());
assert_eq!(component_2.url, "fuchsia-pkg://fuchsia.com/abcd#meta/abcd.cm");
assert_eq!(component_2.component_type, "CML static component");
assert!(component_2.resolved.is_none());
assert!(component_2.execution.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn resolved_cml() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- component_type
// |- url
// |- resolved
// |- use
// |- dev
// |- expose
// |- minfs
// |- instance_id
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/stash#meta/stash.cm".as_bytes())
.unwrap();
fs::create_dir_all(root.join("resolved/use/dev")).unwrap();
fs::create_dir_all(root.join("resolved/expose/minfs")).unwrap();
File::create(root.join("resolved/instance_id"))
.unwrap()
.write_all("abc".as_bytes())
.unwrap();
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/stash#meta/stash.cm");
assert!(component.resolved.is_some());
let resolved = component.resolved.as_ref().unwrap();
let instance_id = &resolved.instance_id;
assert_eq!(instance_id, &Some("abc".to_string()));
let incoming_capabilities = &resolved.incoming_capabilities;
assert_eq!(incoming_capabilities.len(), 1);
let incoming_capability = &incoming_capabilities[0];
assert_eq!(incoming_capability, "dev");
let exposed_capabilities = &resolved.exposed_capabilities;
assert_eq!(exposed_capabilities.len(), 1);
let exposed_capability = &exposed_capabilities[0];
assert_eq!(exposed_capability, "minfs");
assert!(component.execution.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn resolved_cml_without_instance_id() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- component_type
// |- url
// |- resolved
// |- use
// |- dev
// |- expose
// |- minfs
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/stash#meta/stash.cm".as_bytes())
.unwrap();
fs::create_dir_all(root.join("resolved/use/dev")).unwrap();
fs::create_dir_all(root.join("resolved/expose/minfs")).unwrap();
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/stash#meta/stash.cm");
assert!(component.resolved.is_some());
let resolved = component.resolved.as_ref().unwrap();
let instance_id = &resolved.instance_id;
assert!(instance_id.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn full_execution_cml() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- component_type
// |- url
// |- exec
// |- in
// |- pkg
// |- meta
// |- out
// |- minfs
// |- runtime
// |- elf
// |- job_id
// |- process_id
// |- process_start_time
// |- process_start_time_utc_estimate
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/stash#meta/stash.cm".as_bytes())
.unwrap();
fs::create_dir_all(root.join("exec/in/pkg")).unwrap();
fs::create_dir_all(root.join("exec/out/minfs")).unwrap();
fs::create_dir_all(root.join("exec/runtime/elf")).unwrap();
File::create(root.join("exec/in/pkg/meta")).unwrap().write_all("1234".as_bytes()).unwrap();
File::create(root.join("exec/runtime/elf/job_id"))
.unwrap()
.write_all("5454".as_bytes())
.unwrap();
File::create(root.join("exec/runtime/elf/process_id"))
.unwrap()
.write_all("9898".as_bytes())
.unwrap();
File::create(root.join("exec/runtime/elf/process_start_time"))
.unwrap()
.write_all("101010101010".as_bytes())
.unwrap();
File::create(root.join("exec/runtime/elf/process_start_time_utc_estimate"))
.unwrap()
.write_all("Mon 12 Jul 2021 03:53:33 PM UTC".as_bytes())
.unwrap();
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/stash#meta/stash.cm");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 5454);
let process_id = elf_runtime.process_id.unwrap();
assert_eq!(process_id, 9898);
assert!(elf_runtime.process_start_time.is_some());
let process_start_time = elf_runtime.process_start_time.unwrap();
assert_eq!(process_start_time, 101010101010);
assert!(elf_runtime.process_start_time_utc_estimate.is_some());
let process_start_time_utc_estimate =
elf_runtime.process_start_time_utc_estimate.as_ref().unwrap();
assert_eq!(process_start_time_utc_estimate, "Mon 12 Jul 2021 03:53:33 PM UTC");
assert!(execution.merkle_root.is_some());
let merkle_root = execution.merkle_root.as_ref().unwrap();
assert_eq!(merkle_root, "1234");
assert!(execution.outgoing_capabilities.is_some());
let outgoing_capabilities = execution.outgoing_capabilities.as_ref().unwrap();
assert_eq!(outgoing_capabilities.len(), 1);
let outgoing_capability = &outgoing_capabilities[0];
assert_eq!(outgoing_capability, "minfs");
assert!(component.resolved.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn barebones_execution_cml() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- component_type
// |- url
// |- exec
// |- in
// |- out
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/stash#meta/stash.cm".as_bytes())
.unwrap();
fs::create_dir_all(root.join("exec/in")).unwrap();
fs::create_dir_all(root.join("exec/out")).unwrap();
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("stash".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/stash#meta/stash.cm");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_none());
assert!(execution.merkle_root.is_none());
assert!(execution.outgoing_capabilities.is_some());
let outgoing_capabilities = execution.outgoing_capabilities.as_ref().unwrap();
assert!(outgoing_capabilities.is_empty());
assert!(component.resolved.is_none());
}
#[fuchsia_async::run_singlethreaded(test)]
async fn cmx() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- appmgr
// |- children
// |- component_type
// |- url
// |- exec
// |- in
// |- out
// |- hub
// |- r
// |- c
// |- sshd.cmx
// |- 9898
// |- job-id
// |- process-id
// |- url
// |- in
// |- pkg
// |- meta
// |- out
// |- dev
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let appmgr = root.join("children/appmgr");
fs::create_dir(&appmgr).unwrap();
fs::create_dir(appmgr.join("children")).unwrap();
File::create(appmgr.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(appmgr.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/appmgr#meta/appmgr.cm".as_bytes())
.unwrap();
fs::create_dir_all(appmgr.join("exec/in")).unwrap();
fs::create_dir_all(appmgr.join("exec/out/hub/r")).unwrap();
{
let sshd = appmgr.join("exec/out/hub/c/sshd.cmx/9898");
fs::create_dir_all(&sshd).unwrap();
fs::create_dir_all(sshd.join("in/pkg")).unwrap();
fs::create_dir_all(sshd.join("out/dev")).unwrap();
File::create(sshd.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx".as_bytes())
.unwrap();
File::create(sshd.join("in/pkg/meta"))
.unwrap()
.write_all("1234".as_bytes())
.unwrap();
File::create(sshd.join("job-id")).unwrap().write_all("5454".as_bytes()).unwrap();
File::create(sshd.join("process-id"))
.unwrap()
.write_all("9898".as_bytes())
.unwrap();
}
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("sshd".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
let component = &components[0];
assert_eq!(component.moniker, vec!["appmgr", "sshd.cmx"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx");
assert_eq!(component.component_type, "CMX component");
assert!(component.resolved.is_some());
let resolved = component.resolved.as_ref().unwrap();
let incoming_capabilities = &resolved.incoming_capabilities;
assert_eq!(incoming_capabilities.len(), 1);
let instance_id = &resolved.instance_id;
assert!(instance_id.is_none());
let incoming_capability = &incoming_capabilities[0];
assert_eq!(incoming_capability, "pkg");
let exposed_capabilities = &resolved.exposed_capabilities;
assert!(exposed_capabilities.is_empty());
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 5454);
let process_id = elf_runtime.process_id.unwrap();
assert_eq!(process_id, 9898);
assert!(elf_runtime.process_start_time.is_none());
assert!(elf_runtime.process_start_time_utc_estimate.is_none());
assert!(execution.merkle_root.is_some());
let merkle_root = execution.merkle_root.as_ref().unwrap();
assert_eq!(merkle_root, "1234");
assert!(execution.outgoing_capabilities.is_some());
let outgoing_capabilities = execution.outgoing_capabilities.as_ref().unwrap();
assert_eq!(outgoing_capabilities.len(), 1);
let outgoing_capability = &outgoing_capabilities[0];
assert_eq!(outgoing_capability, "dev");
}
#[fuchsia_async::run_singlethreaded(test)]
async fn multiple_cmx_different_process_ids() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- appmgr
// |- children
// |- component_type
// |- url
// |- exec
// |- in
// |- out
// |- hub
// |- r
// |- c
// |- sshd.cmx
// |- 8787
// |- job-id
// |- process-id
// |- url
// |- in
// |- out
// |- 9898
// |- job-id
// |- process-id
// |- url
// |- in
// |- out
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let appmgr = root.join("children/appmgr");
fs::create_dir(&appmgr).unwrap();
fs::create_dir(appmgr.join("children")).unwrap();
File::create(appmgr.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(appmgr.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/appmgr#meta/appmgr.cm".as_bytes())
.unwrap();
fs::create_dir_all(appmgr.join("exec/in")).unwrap();
fs::create_dir_all(appmgr.join("exec/out/hub/r")).unwrap();
{
let sshd_1 = appmgr.join("exec/out/hub/c/sshd.cmx/9898");
fs::create_dir_all(&sshd_1).unwrap();
fs::create_dir(sshd_1.join("in")).unwrap();
fs::create_dir(sshd_1.join("out")).unwrap();
File::create(sshd_1.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx".as_bytes())
.unwrap();
File::create(sshd_1.join("job-id")).unwrap().write_all("5454".as_bytes()).unwrap();
File::create(sshd_1.join("process-id"))
.unwrap()
.write_all("8787".as_bytes())
.unwrap();
}
{
let sshd_2 = appmgr.join("exec/out/hub/c/sshd.cmx/8787");
fs::create_dir_all(&sshd_2).unwrap();
fs::create_dir(sshd_2.join("in")).unwrap();
fs::create_dir(sshd_2.join("out")).unwrap();
File::create(sshd_2.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx".as_bytes())
.unwrap();
File::create(sshd_2.join("job-id")).unwrap().write_all("5454".as_bytes()).unwrap();
File::create(sshd_2.join("process-id"))
.unwrap()
.write_all("9898".as_bytes())
.unwrap();
}
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("sshd".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 2);
{
let component = &components[0];
assert_eq!(component.moniker, vec!["appmgr", "sshd.cmx"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx");
assert_eq!(component.component_type, "CMX component");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 5454);
let process_id = elf_runtime.process_id.unwrap();
assert_eq!(process_id, 9898);
}
{
let component = &components[1];
assert_eq!(component.moniker, vec!["appmgr", "sshd.cmx"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx");
assert_eq!(component.component_type, "CMX component");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 5454);
let process_id = elf_runtime.process_id.unwrap();
assert_eq!(process_id, 8787);
}
}
#[fuchsia_async::run_singlethreaded(test)]
async fn multiple_cmx_different_realms() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- appmgr
// |- children
// |- component_type
// |- url
// |- exec
// |- in
// |- out
// |- hub
// |- r
// |- sys
// |- 1765
// |- r
// |- c
// |- sshd.cmx
// |- 1765
// |- job-id
// |- url
// |- in
// |- out
// |- c
// |- sshd.cmx
// |- 5454
// |- job-id
// |- process-id
// |- url
// |- in
// |- out
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let appmgr = root.join("children/appmgr");
fs::create_dir(&appmgr).unwrap();
fs::create_dir(appmgr.join("children")).unwrap();
File::create(appmgr.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(appmgr.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/appmgr#meta/appmgr.cm".as_bytes())
.unwrap();
fs::create_dir_all(appmgr.join("exec/in")).unwrap();
fs::create_dir_all(appmgr.join("exec/out/hub/r")).unwrap();
fs::create_dir_all(appmgr.join("exec/out/hub/r/sys/1765/r")).unwrap();
{
let sshd_1 = appmgr.join("exec/out/hub/c/sshd.cmx/5454");
fs::create_dir_all(&sshd_1).unwrap();
fs::create_dir(sshd_1.join("in")).unwrap();
fs::create_dir(sshd_1.join("out")).unwrap();
File::create(sshd_1.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx".as_bytes())
.unwrap();
File::create(sshd_1.join("job-id")).unwrap().write_all("5454".as_bytes()).unwrap();
File::create(sshd_1.join("process-id"))
.unwrap()
.write_all("8787".as_bytes())
.unwrap();
}
{
let sshd_2 = appmgr.join("exec/out/hub/r/sys/1765/c/sshd.cmx/1765");
fs::create_dir_all(&sshd_2).unwrap();
fs::create_dir(sshd_2.join("in")).unwrap();
fs::create_dir(sshd_2.join("out")).unwrap();
File::create(sshd_2.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx".as_bytes())
.unwrap();
File::create(sshd_2.join("job-id")).unwrap().write_all("1765".as_bytes()).unwrap();
}
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("sshd".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 2);
{
let component = &components[0];
assert_eq!(component.moniker, vec!["appmgr", "sshd.cmx"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx");
assert_eq!(component.component_type, "CMX component");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 5454);
let process_id = elf_runtime.process_id.unwrap();
assert_eq!(process_id, 8787);
}
{
let component = &components[1];
assert_eq!(component.moniker, vec!["appmgr", "sys", "sshd.cmx"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx");
assert_eq!(component.component_type, "CMX component");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 1765);
assert!(elf_runtime.process_id.is_none());
}
}
#[fuchsia_async::run_singlethreaded(test)]
async fn runner_cmx() {
let test_dir = TempDir::new_in("/tmp").unwrap();
let root = test_dir.path();
// Create the following structure
// .
// |- children
// |- appmgr
// |- children
// |- component_type
// |- url
// |- exec
// |- in
// |- out
// |- hub
// |- c
// |- sshd.cmx
// |- 5454
// |- job-id
// |- process-id
// |- url
// |- in
// |- out
// |- c
// |- foo.cmx
// |- 1234
// |- job-id
// |- process-id
// |- url
// |- in
// |- out
// |- component_type
// |- url
fs::create_dir(root.join("children")).unwrap();
File::create(root.join("component_type")).unwrap().write_all("static".as_bytes()).unwrap();
File::create(root.join("url"))
.unwrap()
.write_all("fuchsia-boot:///#meta/root.cm".as_bytes())
.unwrap();
{
let appmgr = root.join("children/appmgr");
fs::create_dir(&appmgr).unwrap();
fs::create_dir(appmgr.join("children")).unwrap();
File::create(appmgr.join("component_type"))
.unwrap()
.write_all("static".as_bytes())
.unwrap();
File::create(appmgr.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/appmgr#meta/appmgr.cm".as_bytes())
.unwrap();
fs::create_dir_all(appmgr.join("exec/in")).unwrap();
fs::create_dir_all(appmgr.join("exec/out/hub/r")).unwrap();
{
let sshd = appmgr.join("exec/out/hub/c/sshd.cmx/5454");
fs::create_dir_all(&sshd).unwrap();
fs::create_dir(sshd.join("in")).unwrap();
fs::create_dir(sshd.join("out")).unwrap();
File::create(sshd.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/sshd#meta/sshd.cmx".as_bytes())
.unwrap();
File::create(sshd.join("job-id")).unwrap().write_all("5454".as_bytes()).unwrap();
File::create(sshd.join("process-id"))
.unwrap()
.write_all("8787".as_bytes())
.unwrap();
{
let foo = sshd.join("c/foo.cmx/1234");
fs::create_dir_all(&foo).unwrap();
fs::create_dir(foo.join("in")).unwrap();
fs::create_dir(foo.join("out")).unwrap();
File::create(foo.join("url"))
.unwrap()
.write_all("fuchsia-pkg://fuchsia.com/foo#meta/foo.cmx".as_bytes())
.unwrap();
File::create(foo.join("job-id")).unwrap().write_all("1234".as_bytes()).unwrap();
File::create(foo.join("process-id"))
.unwrap()
.write_all("4536".as_bytes())
.unwrap();
}
}
}
let hub_dir = Directory::from_namespace(root.to_path_buf()).unwrap();
let components = find_components("foo.cmx".to_string(), hub_dir).await.unwrap();
assert_eq!(components.len(), 1);
{
let component = &components[0];
assert_eq!(component.moniker, vec!["appmgr", "sshd.cmx", "foo.cmx"].into());
assert_eq!(component.url, "fuchsia-pkg://fuchsia.com/foo#meta/foo.cmx");
assert_eq!(component.component_type, "CMX component");
assert!(component.execution.is_some());
let execution = component.execution.as_ref().unwrap();
assert!(execution.elf_runtime.is_some());
let elf_runtime = execution.elf_runtime.as_ref().unwrap();
assert_eq!(elf_runtime.job_id, 1234);
let process_id = elf_runtime.process_id.unwrap();
assert_eq!(process_id, 4536);
}
}
}
| 38.131048 | 103 | 0.539153 |
fb83a916367792d895b587f71863a4ecce8097c2 | 13,071 | //! A flat representation of memory provided by lifters, typically used in a
//! read-only fashion
//!
//! This memory model implements the `TranslationMemory` trait, allowing lifters
//! to use it to lift instructions.
use crate::architecture::Endian;
use crate::error::*;
use crate::executor;
use crate::il;
use crate::memory::MemoryPermissions;
use crate::translator::TranslationMemory;
use std::collections::BTreeMap;
use std::ops::Bound::Included;
/// A section of backed memory. Essentially a vector of type `u8` with
/// permissions.
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct Section {
data: Vec<u8>,
permissions: MemoryPermissions,
}
impl Section {
/// Create a new memory section.
pub fn new(data: Vec<u8>, permissions: MemoryPermissions) -> Section {
Section { data, permissions }
}
/// Get this memory section's data.
pub fn data(&self) -> &[u8] {
&self.data
}
/// Get the length of this memory section.
pub fn len(&self) -> usize {
self.data.len()
}
/// Return `true` if the data field is empty, `false` otherwise.
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Get the permissions of this memory section.
pub fn permissions(&self) -> MemoryPermissions {
self.permissions
}
/// Truncate the data of this memory section.
fn truncate(&mut self, size: usize) {
self.data.truncate(size);
}
}
/// A simple memory model, containing permissioned sections of type `u8`.
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct Memory {
endian: Endian,
sections: BTreeMap<u64, Section>,
}
impl Memory {
/// Create a new backed memory module with the given endianness.
pub fn new(endian: Endian) -> Memory {
Memory {
endian,
sections: BTreeMap::new(),
}
}
/// Get the sections in this memory module.
pub fn sections(&self) -> &BTreeMap<u64, Section> {
&self.sections
}
/// Get the permissions at the given address.
pub fn permissions(&self, address: u64) -> Option<MemoryPermissions> {
self.section_address(address).map(|section_address| {
self.sections
.get(§ion_address)
.unwrap_or_else(|| {
panic!(
"Failed to get section at 0x{:x} in \
backing::Memory::permissions()",
section_address
)
})
.permissions()
})
}
/// Get the `u8` value at the given address.
pub fn get8(&self, address: u64) -> Option<u8> {
self.section_address_offset(address)
.map(|(address, offset)| {
*self
.sections
.get(&address)
.unwrap_or_else(|| {
panic!(
"Failed to get section at 0x{:x} in \
backing::Memory::permissions()",
address
)
})
.data()
.get(offset)
.unwrap_or_else(|| {
panic!(
"Failed to get offset 0x{:x} from 0x{:x} in \
backing::Memory::permissions()",
offset, address
)
})
})
}
/// Set the 32-bit value at the given address, allowing the memory model
/// to account for the underlying endianness.
pub fn set32(&mut self, address: u64, value: u32) -> Result<()> {
let (section_address, offset) = self
.section_address_offset(address)
.unwrap_or_else(|| panic!("Address 0x{:x} has no section", address));
let section = self.sections.get_mut(§ion_address).unwrap();
if offset + 4 > section.len() {
bail!(format!(
"Section at 0x{:x} is of size {}, and not big \
enough to hold 32-bit value",
section_address,
section.len()
));
}
match self.endian {
Endian::Big => {
*section.data.get_mut(offset).unwrap() = (value >> 24) as u8;
*section.data.get_mut(offset + 1).unwrap() = (value >> 16) as u8;
*section.data.get_mut(offset + 2).unwrap() = (value >> 8) as u8;
*section.data.get_mut(offset + 3).unwrap() = (value) as u8;
}
Endian::Little => {
*section.data.get_mut(offset).unwrap() = (value) as u8;
*section.data.get_mut(offset + 1).unwrap() = (value >> 8) as u8;
*section.data.get_mut(offset + 2).unwrap() = (value >> 16) as u8;
*section.data.get_mut(offset + 3).unwrap() = (value >> 24) as u8;
}
}
Ok(())
}
/// Get the 32-bit value at the given address, allowing the memory model to
/// account for the underlying endianness.
pub fn get32(&self, address: u64) -> Option<u32> {
let (section_address, offset) = match self.section_address_offset(address) {
Some((section_address, offset)) => (section_address, offset),
None => return None,
};
let section = self.sections.get(§ion_address).unwrap();
if offset + 4 > section.len() {
return None;
}
Some(match self.endian {
Endian::Big => {
(section.data[offset] as u32) << 24
| (section.data[offset + 1] as u32) << 16
| (section.data[offset + 2] as u32) << 8
| (section.data[offset + 3] as u32)
}
Endian::Little => {
(section.data[offset] as u32)
| (section.data[offset + 1] as u32) << 8
| (section.data[offset + 2] as u32) << 16
| (section.data[offset + 3] as u32) << 24
}
})
}
/// Get a constant value up to a certain number of bits
pub fn get(&self, address: u64, bits: usize) -> Option<il::Constant> {
if bits % 8 > 0 || bits == 0 {
return None;
}
let mut value = il::expr_const(self.get8(address)? as u64, bits);
match self.endian {
Endian::Big => {
for i in 1..(bits / 8) {
value = il::Expression::or(
il::Expression::shl(value, il::expr_const(8, bits)).unwrap(),
il::expr_const(self.get8(address + i as u64).unwrap() as u64, bits),
)
.unwrap();
}
Some(executor::eval(&value).unwrap())
}
Endian::Little => {
for i in 1..(bits / 8) {
value = il::Expression::or(
il::Expression::shl(
il::expr_const(self.get8(address + i as u64).unwrap() as u64, bits),
il::expr_const((i * 8) as u64, bits),
)
.unwrap(),
value,
)
.unwrap();
}
Some(executor::eval(&value).unwrap())
}
}
}
/// Set the memory at the given address, and give that memory the given
/// permissions.
///
/// This takes care of the underlying memory sections automatically.
pub fn set_memory(&mut self, address: u64, data: Vec<u8>, permissions: MemoryPermissions) {
// All overlapping memory sections need to be adjusted
// Start by collecting addresses and lengths
let als = self
.sections
.iter()
.map(|(address, section)| (*address, section.len()))
.collect::<Vec<(u64, usize)>>();
// Adjust overlapping memory sections
for al in als {
let (a, l) = (al.0, al.1 as u64);
if a < address && a + l > address {
if a + l <= address + data.len() as u64 {
let new_length = (address - a) as usize;
self.sections
.get_mut(&a)
.unwrap_or_else(|| {
panic!(
"Failed to get section 0x{:x} in \
backing::Memory::set_memory(). This should never \
happen.",
a
)
})
.truncate(new_length);
} else {
let offset = address + data.len() as u64 - a;
let split = self
.sections
.get_mut(&a)
.unwrap_or_else(|| {
panic!(
"Failed to get section 0x{:x} in \
backing::Memory::set_memory(). This should \
never happen.",
a
)
})
.data
.split_off(offset as usize);
let permissions = self
.sections
.get(&a)
.unwrap_or_else(|| {
panic!(
"Failed to get section 0x{:x} in \
backing::Memory::set_memory(). This should \
never happen.",
a
)
})
.permissions();
self.sections.insert(
address + data.len() as u64,
Section::new(split, permissions),
);
let new_length = (address - a) as usize;
self.sections.get_mut(&a).unwrap().truncate(new_length);
}
} else if a >= address && a + l <= address + data.len() as u64 {
if self.sections.get(&a).is_none() {
panic!(
"About to remove 0x{:x} from sections in \
backing::Memory::set_memory, but address does not
exist",
a
);
}
self.sections.remove(&a);
} else if a >= address
&& a < address + data.len() as u64
&& a + l > address + data.len() as u64
{
let offset = address + data.len() as u64 - a;
let data_len = self.sections.get(&a).unwrap().data.len() as u64;
if offset > data_len {
panic!("offset 0x{:x} is > data.len() 0x{:x}", offset, data_len);
}
let split = self
.sections
.get_mut(&a)
.unwrap()
.data
.split_off(offset as usize);
let permissions = self
.sections
.get(&a)
.unwrap_or_else(|| {
panic!(
"Failed to get section for 0x{:x} while updating \
permissions in backing::Memory::set_memory(). \
This should never happen.",
a
)
})
.permissions();
self.sections.remove(&a);
self.sections.insert(
address + data.len() as u64,
Section::new(split, permissions),
);
}
}
self.sections
.insert(address, Section::new(data, permissions));
}
fn section_address(&self, address: u64) -> Option<u64> {
let mut sections = self.sections.range((Included(0), Included(address)));
if let Some((section_address, section)) = sections.next_back() {
if *section_address <= address && *section_address + section.len() as u64 > address {
return Some(*section_address);
}
}
None
}
fn section_address_offset(&self, address: u64) -> Option<(u64, usize)> {
self.section_address(address)
.map(|section_address| (section_address, (address - section_address) as usize))
}
}
impl TranslationMemory for Memory {
fn get_u8(&self, address: u64) -> Option<u8> {
self.get8(address)
}
fn permissions(&self, address: u64) -> Option<MemoryPermissions> {
self.permissions(address)
}
}
| 36.308333 | 97 | 0.4549 |
8f157042a12ff3d4cd3ada3a005a04bc246f36a1 | 281 | #![doc(html_favicon_url = "https://www.ruma.io/favicon.ico")]
#![doc(html_logo_url = "https://www.ruma.io/images/logo.png")]
//! (De)serializable types for the Matrix Identity Service API.
#![warn(missing_docs)]
pub mod authentication;
pub mod keys;
pub mod status;
pub mod tos;
| 25.545455 | 63 | 0.718861 |
72682a63c15b9acc87a3344044d08a127d31a3ca | 3,737 | //! Object representation for BitmapData
use crate::avm2::activation::Activation;
use crate::avm2::names::{Namespace, QName};
use crate::avm2::object::script_object::ScriptObjectData;
use crate::avm2::object::{ClassObject, Object, ObjectPtr, TObject};
use crate::avm2::value::Value;
use crate::avm2::Error;
use crate::bitmap::bitmap_data::BitmapData;
use gc_arena::{Collect, GcCell, MutationContext};
use std::cell::{Ref, RefMut};
/// A class instance allocator that allocates BitmapData objects.
pub fn bitmapdata_allocator<'gc>(
class: ClassObject<'gc>,
proto: Object<'gc>,
activation: &mut Activation<'_, 'gc, '_>,
) -> Result<Object<'gc>, Error> {
let base = ScriptObjectData::base_new(Some(proto), Some(class));
Ok(BitmapDataObject(GcCell::allocate(
activation.context.gc_context,
BitmapDataObjectData {
base,
bitmap_data: None,
},
))
.into())
}
#[derive(Clone, Collect, Debug, Copy)]
#[collect(no_drop)]
pub struct BitmapDataObject<'gc>(GcCell<'gc, BitmapDataObjectData<'gc>>);
#[derive(Clone, Collect, Debug)]
#[collect(no_drop)]
pub struct BitmapDataObjectData<'gc> {
/// Base script object
base: ScriptObjectData<'gc>,
bitmap_data: Option<GcCell<'gc, BitmapData<'gc>>>,
}
impl<'gc> BitmapDataObject<'gc> {
pub fn from_bitmap_data(
activation: &mut Activation<'_, 'gc, '_>,
bitmap_data: GcCell<'gc, BitmapData<'gc>>,
class: ClassObject<'gc>,
) -> Result<Object<'gc>, Error> {
let proto = class
.get_property(
class.into(),
&QName::new(Namespace::public(), "prototype").into(),
activation,
)?
.coerce_to_object(activation)?;
let mut instance = Self(GcCell::allocate(
activation.context.gc_context,
BitmapDataObjectData {
base: ScriptObjectData::base_new(Some(proto), Some(class)),
bitmap_data: Some(bitmap_data),
},
));
bitmap_data
.write(activation.context.gc_context)
.init_object2(instance.into());
instance.install_instance_traits(activation, class)?;
class.call_native_init(Some(instance.into()), &[], activation, Some(class))?;
Ok(instance.into())
}
}
impl<'gc> TObject<'gc> for BitmapDataObject<'gc> {
fn base(&self) -> Ref<ScriptObjectData<'gc>> {
Ref::map(self.0.read(), |read| &read.base)
}
fn base_mut(&self, mc: MutationContext<'gc, '_>) -> RefMut<ScriptObjectData<'gc>> {
RefMut::map(self.0.write(mc), |write| &mut write.base)
}
fn as_ptr(&self) -> *const ObjectPtr {
self.0.as_ptr() as *const ObjectPtr
}
fn derive(&self, activation: &mut Activation<'_, 'gc, '_>) -> Result<Object<'gc>, Error> {
let base = ScriptObjectData::base_new(Some((*self).into()), None);
Ok(BitmapDataObject(GcCell::allocate(
activation.context.gc_context,
BitmapDataObjectData {
base,
bitmap_data: None,
},
))
.into())
}
fn value_of(&self, _mc: MutationContext<'gc, '_>) -> Result<Value<'gc>, Error> {
Ok(Value::Object(Object::from(*self)))
}
/// Unwrap this object's bitmap data
fn as_bitmap_data(&self) -> Option<GcCell<'gc, BitmapData<'gc>>> {
self.0.read().bitmap_data
}
/// Initialize the bitmap data in this object, if it's capable of
/// supporting said data
fn init_bitmap_data(
&self,
mc: MutationContext<'gc, '_>,
new_bitmap: GcCell<'gc, BitmapData<'gc>>,
) {
self.0.write(mc).bitmap_data = Some(new_bitmap)
}
}
| 30.884298 | 94 | 0.603158 |
0333028bb2f0835b7e5d0ca8de9ee4e5b1fa63e1 | 6,449 | use crate::processor::{ProcessValue, ProcessingState, Processor};
use crate::protocol::{Breadcrumb, Event};
use crate::types::{Annotated, ErrorKind, Meta, Object, ProcessingResult, Value};
/// Replace remaining values and all existing meta with an errors.
fn create_errors(other: &mut Object<Value>) {
for value in other.values_mut() {
*value = Annotated::from_error(ErrorKind::InvalidAttribute, None);
}
}
pub struct RemoveOtherProcessor;
impl Processor for RemoveOtherProcessor {
fn process_other(
&mut self,
other: &mut Object<Value>,
state: &ProcessingState<'_>,
) -> ProcessingResult {
// Drop unknown attributes at all levels without error messages, unless `retain = "true"`
// was specified explicitly on the field.
if !state.attrs().retain {
other.clear();
}
Ok(())
}
fn process_breadcrumb(
&mut self,
breadcrumb: &mut Breadcrumb,
_meta: &mut Meta,
state: &ProcessingState<'_>,
) -> ProcessingResult {
// Move the current map out so we don't clear it in `process_other`
let mut other = std::mem::take(&mut breadcrumb.other);
create_errors(&mut other);
// Recursively clean all `other`s now. Note that this won't touch the event's other
breadcrumb.process_child_values(self, state)?;
breadcrumb.other = other;
Ok(())
}
fn process_event(
&mut self,
event: &mut Event,
_meta: &mut Meta,
state: &ProcessingState<'_>,
) -> ProcessingResult {
// Move the current map out so we don't clear it in `process_other`
let mut other = std::mem::take(&mut event.other);
// Drop Sentry internal attributes
other.remove("metadata");
other.remove("hashes");
// Drop known legacy attributes at top-level without errors
other.remove("applecrashreport");
other.remove("device");
other.remove("repos");
other.remove("query");
// Replace remaining values and all existing meta with an errors
create_errors(&mut other);
// Recursively clean all `other`s now. Note that this won't touch the event's other
event.process_child_values(self, state)?;
event.other = other;
Ok(())
}
}
#[cfg(test)]
use {crate::processor::process_value, crate::protocol::ContextInner};
#[test]
fn test_remove_legacy_attributes() {
let mut event = Annotated::new(Event {
other: {
let mut other = Object::new();
other.insert("applecrashreport".to_string(), Value::U64(42).into());
other.insert("device".to_string(), Value::U64(42).into());
other.insert("repos".to_string(), Value::U64(42).into());
other.insert("query".to_string(), Value::U64(42).into());
other
},
..Default::default()
});
process_value(
&mut event,
&mut RemoveOtherProcessor,
ProcessingState::root(),
)
.unwrap();
assert!(event.value().unwrap().other.is_empty());
}
#[test]
fn test_remove_unknown_attributes() {
let mut event = Annotated::new(Event {
other: {
let mut other = Object::new();
other.insert("foo".to_string(), Value::U64(42).into());
other.insert("bar".to_string(), Value::U64(42).into());
other
},
..Default::default()
});
process_value(
&mut event,
&mut RemoveOtherProcessor,
ProcessingState::root(),
)
.unwrap();
let other = &event.value().unwrap().other;
assert_eq_dbg!(
*other.get("foo").unwrap(),
Annotated::from_error(ErrorKind::InvalidAttribute, None)
);
assert_eq_dbg!(
*other.get("bar").unwrap(),
Annotated::from_error(ErrorKind::InvalidAttribute, None)
);
}
#[test]
fn test_remove_nested_other() {
use crate::protocol::User;
let mut event = Annotated::new(Event {
user: Annotated::from(User {
other: {
let mut other = Object::new();
other.insert("foo".to_string(), Value::U64(42).into());
other.insert("bar".to_string(), Value::U64(42).into());
other
},
..Default::default()
}),
..Default::default()
});
process_value(
&mut event,
&mut RemoveOtherProcessor,
ProcessingState::root(),
)
.unwrap();
assert!(get_value!(event.user!).other.is_empty());
}
#[test]
fn test_retain_context_other() {
use crate::protocol::{Context, Contexts, OsContext};
let mut os = OsContext::default();
os.other
.insert("foo".to_string(), Annotated::from(Value::U64(42)));
let mut contexts = Object::new();
contexts.insert(
"renamed".to_string(),
Annotated::from(ContextInner(Context::Os(Box::new(os)))),
);
let mut event = Annotated::new(Event {
contexts: Annotated::from(Contexts(contexts.clone())),
..Default::default()
});
process_value(
&mut event,
&mut RemoveOtherProcessor,
ProcessingState::root(),
)
.unwrap();
assert_eq_dbg!(get_value!(event.contexts!).0, contexts);
}
#[test]
fn test_breadcrumb_errors() {
use crate::protocol::Values;
let mut event = Annotated::new(Event {
breadcrumbs: Annotated::new(Values::new(vec![Annotated::new(Breadcrumb {
other: {
let mut other = Object::new();
other.insert("foo".to_string(), Value::U64(42).into());
other.insert("bar".to_string(), Value::U64(42).into());
other
},
..Breadcrumb::default()
})])),
..Default::default()
});
process_value(
&mut event,
&mut RemoveOtherProcessor,
ProcessingState::root(),
)
.unwrap();
let other = &event
.value()
.unwrap()
.breadcrumbs
.value()
.unwrap()
.values
.value()
.unwrap()[0]
.value()
.unwrap()
.other;
assert_eq_dbg!(
*other.get("foo").unwrap(),
Annotated::from_error(ErrorKind::InvalidAttribute, None)
);
assert_eq_dbg!(
*other.get("bar").unwrap(),
Annotated::from_error(ErrorKind::InvalidAttribute, None)
);
}
| 27.559829 | 97 | 0.571096 |
87de9ae821655dd4474600cdae477ad03276f031 | 14,574 | // Copyright (c) 2020 zenoxygen
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
extern crate anyhow;
extern crate url;
use crate::handshake::*;
use crate::message::*;
use crate::peer::*;
use crate::piece::*;
use anyhow::{anyhow, Result};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use std::io::{Cursor, Read, Write};
use std::net::{IpAddr, SocketAddr, TcpStream};
use std::time::Duration;
/// Client structure.
pub struct Client {
// A peer
peer: Peer,
// Torrent peer id
peer_id: Vec<u8>,
// Torrent info hash
info_hash: Vec<u8>,
// Connection to peer
conn: TcpStream,
// Bitfield of pieces
bitfield: Vec<u8>,
// Peer has choked this client
choked: bool,
}
impl Client {
/// Build a new client.
///
/// # Arguments
///
/// * `peer_id` - Urlencoded 20-byte string used as a unique ID for the client.
/// * `info_hash` - 20-byte SHA-1 hash of the info key in the metainfo file.
///
pub fn new(peer: Peer, peer_id: Vec<u8>, info_hash: Vec<u8>) -> Result<Client> {
// Open connection with remote peer
let peer_socket = SocketAddr::new(IpAddr::V4(peer.ip), peer.port);
let conn = match TcpStream::connect_timeout(&peer_socket, Duration::from_secs(15)) {
Ok(conn) => conn,
Err(_) => return Err(anyhow!("could not connect to peer")),
};
info!("Connected to peer {:?}", peer.id);
// Return new client
let client = Client {
peer,
peer_id,
info_hash,
conn,
bitfield: vec![],
choked: true,
};
Ok(client)
}
// Return choked value.
pub fn is_choked(&self) -> bool {
self.choked
}
/// Check if peer has a piece.
///
/// # Arguments
///
/// * `index` - The piece index to check.
///
pub fn has_piece(&self, index: u32) -> bool {
let byte_index = index / 8;
let offset = index % 8;
// Prevent unbounded values
if byte_index < self.bitfield.len() as u32 {
// Check for piece index into bitfield
return self.bitfield[byte_index as usize] >> (7 - offset) as u8 & 1 != 0;
}
false
}
/// Set a piece that peer has.
///
/// # Arguments
///
/// * `index` - The piece index to update into bitfield.
///
pub fn set_piece(&mut self, index: u32) {
let byte_index = index / 8;
let offset = index % 8;
// Create a new bitfield
let mut bitfield: Vec<u8> = self.bitfield.to_vec();
// Prevent unbounded values
if byte_index < self.bitfield.len() as u32 {
// Set piece index into bitfield
bitfield[byte_index as usize] |= (1 << (7 - offset)) as u8;
self.bitfield = bitfield;
}
}
/// Set connection timeout.
///
/// # Arguments
///
/// * `secs` - The timeout in seconds.
///
pub fn set_connection_timeout(&self, secs: u64) -> Result<()> {
// Set write timeout
if self
.conn
.set_write_timeout(Some(Duration::from_secs(secs)))
.is_err()
{
return Err(anyhow!("could not set write timeout"));
}
// Set read timeout
if self
.conn
.set_read_timeout(Some(Duration::from_secs(secs)))
.is_err()
{
return Err(anyhow!("could not set read timeout"));
}
Ok(())
}
/// Handshake with remote peer.
pub fn handshake_with_peer(&mut self) -> Result<()> {
// Create handshake
let peer_id = self.peer_id.clone();
let info_hash = self.info_hash.clone();
let handshake = Handshake::new(peer_id, info_hash);
// Send handshake to remote peer
let handshake_encoded: Vec<u8> = handshake.serialize()?;
if self.conn.write(&handshake_encoded).is_err() {
return Err(anyhow!("could not send handshake to peer"));
}
// Read handshake received from remote peer
let handshake_len: usize = self.read_handshake_len()?;
let mut handshake_buf: Vec<u8> = vec![0; 48 + handshake_len];
if self.conn.read_exact(&mut handshake_buf).is_err() {
return Err(anyhow!("could not read handshake received from peer"));
}
// Check info hash received from remote peer
let handshake_decoded: Handshake = deserialize_handshake(&handshake_buf, handshake_len)?;
if handshake_decoded.info_hash != self.info_hash {
return Err(anyhow!("invalid handshake received from peer"));
}
Ok(())
}
/// Read handshake length.
fn read_handshake_len(&mut self) -> Result<usize> {
// Read 1 byte into buffer
let mut buf = [0; 1];
if self.conn.read_exact(&mut buf).is_err() {
return Err(anyhow!(
"could not read handshake length received from peer"
));
}
// Get handshake length
let handshake_len = buf[0];
if handshake_len == 0 {
return Err(anyhow!("invalid handshake length received from peer"));
}
Ok(handshake_len as usize)
}
/// Read message from remote peer.
pub fn read_message(&mut self) -> Result<Message> {
let message_len: usize = self.read_message_len()?;
// If message length is 0, it's a keep-alive
if message_len == 0 {
info!("Receive KEEP_ALIVE from peer {:?}", self.peer.id);
return Err(anyhow!("keep-alive"));
}
// Read message
let mut message_buf: Vec<u8> = vec![0; message_len];
if self.conn.read_exact(&mut message_buf).is_err() {
return Err(anyhow!("could not read message received from peer"));
}
// Deserialize message
let message: Message = deserialize_message(&message_buf, message_len)?;
Ok(message)
}
/// Read message length.
fn read_message_len(&mut self) -> Result<usize> {
// Read bytes into buffer
let mut buf = vec![0; 4];
if self.conn.read_exact(&mut buf).is_err() {
return Err(anyhow!("could not read message length received from peer"));
}
// Get message length
let mut cursor = Cursor::new(buf);
let message_len = cursor.read_u32::<BigEndian>()?;
Ok(message_len as usize)
}
/// Read CHOKE message from remote peer.
pub fn read_choke(&mut self) {
info!("Receive MESSAGE_CHOKE from peer {:?}", self.peer.id);
self.choked = true
}
/// Send UNCHOKE message to remote peer.
pub fn send_unchoke(&mut self) -> Result<()> {
let message: Message = Message::new(MESSAGE_UNCHOKE);
let message_encoded = message.serialize()?;
info!("Send MESSAGE_UNCHOKE to peer {:?}", self.peer.id);
if self.conn.write(&message_encoded).is_err() {
return Err(anyhow!("could not send MESSAGE_UNCHOKE to peer"));
}
Ok(())
}
/// Read UNCHOKE message from remote peer.
pub fn read_unchoke(&mut self) {
info!("Receive MESSAGE_UNCHOKE from peer {:?}", self.peer.id);
self.choked = false
}
/// Send INTERESTED message to remote peer.
pub fn send_interested(&mut self) -> Result<()> {
let message: Message = Message::new(MESSAGE_INTERESTED);
let message_encoded = message.serialize()?;
info!("Send MESSAGE_INTERESTED to peer {:?}", self.peer.id);
if self.conn.write(&message_encoded).is_err() {
return Err(anyhow!("could not send MESSAGE_INTERESTED to peer"));
}
Ok(())
}
/// Send HAVE message to remote peer.
///
/// # Arguments
///
/// * `index` - The index of a piece that has just been successfully downloaded and verified.
///
pub fn send_have(&mut self, index: u32) -> Result<()> {
let mut payload: Vec<u8> = vec![];
payload.write_u32::<BigEndian>(index)?;
let message: Message = Message::new_with_payload(MESSAGE_HAVE, payload);
let message_encoded = message.serialize()?;
info!("Send MESSAGE_HAVE to peer {:?}", self.peer.id);
if self.conn.write(&message_encoded).is_err() {
return Err(anyhow!("could not send MESSAGE_HAVE to peer"));
}
Ok(())
}
/// Read HAVE message from remote peer.
///
/// The message payload is the zero-based index of a piece that has just been successfully downloaded and verified via the hash.
///
/// # Arguments
///
/// * `message` - The message to parse.
///
pub fn read_have(&mut self, message: Message) -> Result<()> {
info!("Receive MESSAGE_HAVE from peer {:?}", self.peer.id);
// Check if message id and payload are valid
if message.id != MESSAGE_HAVE || message.payload.to_vec().len() != 4 {
return Err(anyhow!("received invalid MESSAGE_HAVE from peer"));
}
// Get piece index
let mut payload_cursor = Cursor::new(message.payload.to_vec());
let index = payload_cursor.read_u32::<BigEndian>()?;
// Update bitfield
self.set_piece(index);
Ok(())
}
/// Read BITFIELD message from remote peer.
///
/// The message payload is a bitfield representing the pieces that have been successfully downloaded.
/// The high bit in the first byte corresponds to piece index 0.
/// Bits that are cleared indicated a missing piece, and set bits indicate a valid and available piece.
/// Spare bits at the end are set to zero.
///
pub fn read_bitfield(&mut self) -> Result<()> {
info!("Receive MESSAGE_BITFIELD from peer {:?}", self.peer.id);
let message: Message = self.read_message()?;
if message.id != MESSAGE_BITFIELD {
return Err(anyhow!("received invalid MESSAGE_BITFIELD from peer"));
}
// Update bitfield
self.bitfield = message.payload.to_vec();
Ok(())
}
/// Send REQUEST message to remote peer.
///
/// The request message is fixed length, and is used to request a block.
///
/// # Arguments
///
/// * `index` - The zero-based piece index.
/// * `begin` - The zero-based byte offset within the piece.
/// * `length` - The requested length.
///
pub fn send_request(&mut self, index: u32, begin: u32, length: u32) -> Result<()> {
let mut payload: Vec<u8> = vec![];
payload.write_u32::<BigEndian>(index)?;
payload.write_u32::<BigEndian>(begin)?;
payload.write_u32::<BigEndian>(length)?;
let message: Message = Message::new_with_payload(MESSAGE_REQUEST, payload);
let message_encoded = message.serialize()?;
info!(
"Send MESSAGE_REQUEST for piece {:?} [{:?}:{:?}] to peer {:?}",
index,
begin,
begin + length,
self.peer.id
);
if self.conn.write(&message_encoded).is_err() {
return Err(anyhow!("could not send MESSAGE_REQUEST to peer"));
}
Ok(())
}
/// Read PIECE message from remote peer.
///
/// The message payload contains the following information:
/// - index: integer specifying the zero-based piece index
/// - begin: integer specifying the zero-based byte offset within the piece
/// - block: block of data, which is a subset of the piece specified by index.
///
/// # Arguments
///
/// * `message` - The message to parse.
/// * `piece_work` - A work piece.
///
pub fn read_piece(&mut self, message: Message, piece_work: &mut PieceWork) -> Result<()> {
info!("Receive MESSAGE_PIECE from peer {:?}", self.peer.id);
// Check if message id and payload are valid
if message.id != MESSAGE_PIECE || message.payload.to_vec().len() < 8 {
return Err(anyhow!("received invalid MESSAGE_HAVE from peer"));
}
// Get message payload
let payload: Vec<u8> = message.payload.to_vec();
// Get piece index
let mut payload_cursor = Cursor::new(&payload[0..4]);
let index = payload_cursor.read_u32::<BigEndian>()?;
// Check if piece index is valid
if index != piece_work.index {
return Err(anyhow!("received invalid piece from peer"));
}
// Get byte offset within piece
let mut payload_cursor = Cursor::new(&payload[4..8]);
let begin: u32 = payload_cursor.read_u32::<BigEndian>()?;
// Get piece block
let block: Vec<u8> = payload[8..].to_vec();
let block_len: u32 = block.len() as u32;
// Check if byte offset is valid
if begin + block_len > piece_work.length as u32 {
return Err(anyhow!(
"received invalid byte offset within piece from peer"
));
}
info!(
"Download piece {:?} [{:?}:{:?}] from peer {:?}",
index,
begin,
begin + block_len,
self.peer.id
);
// Add block to piece data
for i in 0..block_len {
piece_work.data[begin as usize + i as usize] = block[i as usize];
}
// Update downloaded data counter
piece_work.downloaded += block_len;
// Update requests counter
piece_work.requests -= 1;
Ok(())
}
}
| 32.243363 | 132 | 0.585152 |
696e64a3100ccb37751c67e51febc8d3acafbd7b | 7,905 | use lazy_static::lazy_static;
use crate::error::Result;
use bellperson::gadgets::{boolean, num};
use bellperson::{ConstraintSystem, SynthesisError};
use fil_sapling_crypto::jubjub::JubjubEngine;
use generic_array::typenum;
use generic_array::typenum::{U1, U11, U16, U2, U24, U36, U4, U8};
use merkletree::hash::{Algorithm as LightAlgorithm, Hashable as LightHashable};
use merkletree::merkle::Element;
use neptune::poseidon::PoseidonConstants;
use paired::bls12_381::{Bls12, Fr, FrRepr};
use paired::Engine;
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
pub type PoseidonBinaryArity = U2;
pub type PoseidonQuadArity = U4;
pub type PoseidonOctArity = U8;
/// Arity to use by default for `hash_md` with poseidon.
pub type PoseidonMDArity = U36;
/// Arity to use for hasher implementations (Poseidon) which are specialized at compile time.
/// Must match PoseidonArity
pub const MERKLE_TREE_ARITY: usize = 2;
lazy_static! {
pub static ref POSEIDON_CONSTANTS_1: PoseidonConstants::<Bls12, U1> = PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_2: PoseidonConstants::<Bls12, U2> = PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_4: PoseidonConstants::<Bls12, U4> = PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_8: PoseidonConstants::<Bls12, U8> = PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_16: PoseidonConstants::<Bls12, U16> =
PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_24: PoseidonConstants::<Bls12, U24> =
PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_36: PoseidonConstants::<Bls12, U36> =
PoseidonConstants::new();
pub static ref POSEIDON_CONSTANTS_11: PoseidonConstants::<Bls12, U11> =
PoseidonConstants::new();
pub static ref POSEIDON_MD_CONSTANTS: PoseidonConstants::<Bls12, PoseidonMDArity> =
PoseidonConstants::new();
}
pub trait PoseidonArity<E: Engine>:
typenum::Unsigned
+ Send
+ Sync
+ Clone
+ std::ops::Add<typenum::B1>
+ std::ops::Add<typenum::UInt<typenum::UTerm, typenum::B1>>
where
typenum::Add1<Self>: generic_array::ArrayLength<E::Fr>,
{
#[allow(non_snake_case)]
fn PARAMETERS() -> &'static PoseidonConstants<E, Self>;
}
impl PoseidonArity<Bls12> for U1 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_1
}
}
impl PoseidonArity<Bls12> for U2 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_2
}
}
impl PoseidonArity<Bls12> for U4 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_4
}
}
impl PoseidonArity<Bls12> for U8 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_8
}
}
impl PoseidonArity<Bls12> for U11 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_11
}
}
impl PoseidonArity<Bls12> for U16 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_16
}
}
impl PoseidonArity<Bls12> for U24 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_24
}
}
impl PoseidonArity<Bls12> for U36 {
fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {
&*POSEIDON_CONSTANTS_36
}
}
pub trait PoseidonEngine<Arity>: Engine
where
Arity: 'static
+ typenum::Unsigned
+ std::ops::Add<typenum::B1>
+ std::ops::Add<typenum::UInt<typenum::UTerm, typenum::B1>>,
typenum::Add1<Arity>: generic_array::ArrayLength<Self::Fr>,
{
#[allow(non_snake_case)]
fn PARAMETERS() -> &'static PoseidonConstants<Self, Arity>;
}
impl<E: Engine, U: 'static + PoseidonArity<E>> PoseidonEngine<U> for E
where
typenum::Add1<U>: generic_array::ArrayLength<E::Fr>,
{
fn PARAMETERS() -> &'static PoseidonConstants<Self, U> {
PoseidonArity::PARAMETERS()
}
}
pub trait Domain:
Ord
+ Copy
+ Clone
+ AsRef<[u8]>
+ Default
+ ::std::fmt::Debug
+ Eq
+ Send
+ Sync
+ From<Fr>
+ From<FrRepr>
+ Into<Fr>
+ Serialize
+ DeserializeOwned
+ Element
+ std::hash::Hash
{
fn serialize(&self) -> Vec<u8>;
fn into_bytes(&self) -> Vec<u8>;
fn try_from_bytes(raw: &[u8]) -> Result<Self>;
/// Write itself into the given slice, LittleEndian bytes.
fn write_bytes(&self, _: &mut [u8]) -> Result<()>;
fn random<R: rand::RngCore>(rng: &mut R) -> Self;
}
pub trait HashFunction<T: Domain>:
Clone + ::std::fmt::Debug + Send + Sync + LightAlgorithm<T>
{
fn hash(data: &[u8]) -> T;
fn hash2(a: &T, b: &T) -> T;
fn hash_md(input: &[T]) -> T {
// Default to binary.
assert!(input.len() > 1, "hash_md needs more than one element.");
input
.iter()
.skip(1)
.fold(input[0], |acc, elt| Self::hash2(&acc, elt))
}
fn hash_leaf(data: &dyn LightHashable<Self>) -> T {
let mut a = Self::default();
data.hash(&mut a);
let item_hash = a.hash();
a.leaf(item_hash)
}
fn hash_single_node(data: &dyn LightHashable<Self>) -> T {
let mut a = Self::default();
data.hash(&mut a);
a.hash()
}
fn hash_leaf_circuit<E: JubjubEngine + PoseidonEngine<typenum::U2>, CS: ConstraintSystem<E>>(
mut cs: CS,
left: &num::AllocatedNum<E>,
right: &num::AllocatedNum<E>,
height: usize,
params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError> {
let left_bits = left.to_bits_le(cs.namespace(|| "left num into bits"))?;
let right_bits = right.to_bits_le(cs.namespace(|| "right num into bits"))?;
Self::hash_leaf_bits_circuit(cs, &left_bits, &right_bits, height, params)
}
fn hash_multi_leaf_circuit<
Arity: 'static + PoseidonArity<E>,
E: JubjubEngine + PoseidonEngine<Arity>,
CS: ConstraintSystem<E>,
>(
cs: CS,
leaves: &[num::AllocatedNum<E>],
height: usize,
params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError>
where
typenum::Add1<Arity>: generic_array::ArrayLength<E::Fr>;
fn hash_md_circuit<
E: JubjubEngine + PoseidonEngine<PoseidonMDArity>,
CS: ConstraintSystem<E>,
>(
_cs: &mut CS,
_elements: &[num::AllocatedNum<E>],
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError> {
unimplemented!();
}
fn hash_leaf_bits_circuit<E: JubjubEngine, CS: ConstraintSystem<E>>(
_cs: CS,
_left: &[boolean::Boolean],
_right: &[boolean::Boolean],
_height: usize,
_params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError> {
unimplemented!();
}
fn hash_circuit<E: JubjubEngine, CS: ConstraintSystem<E>>(
cs: CS,
bits: &[boolean::Boolean],
params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError>;
fn hash2_circuit<E, CS>(
cs: CS,
a: &num::AllocatedNum<E>,
b: &num::AllocatedNum<E>,
params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError>
where
E: JubjubEngine + PoseidonEngine<typenum::U2>,
CS: ConstraintSystem<E>;
}
pub trait Hasher: Clone + ::std::fmt::Debug + Eq + Default + Send + Sync {
type Domain: Domain + LightHashable<Self::Function> + AsRef<Self::Domain>;
type Function: HashFunction<Self::Domain>;
fn create_label(data: &[u8], m: usize) -> Result<Self::Domain>;
fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain>;
fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain>;
fn name() -> String;
}
| 31.369048 | 99 | 0.637192 |
0a592ccf61f78c322612247c27a50bf7e49e5d75 | 406 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod decl;
mod expand;
mod ty;
mod variance;
pub use decl::ClassElt;
pub use expand::{ExpandEnv, TypeExpansion, TypeExpansions};
pub use ty::{Exact, FunParam, FunType, ParamMode, Prim, Ty, Ty_};
pub use variance::Variance;
| 29 | 66 | 0.738916 |
dd21e2d77f05f275e140786e148a53ffecb678c4 | 148,510 | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! TBD: Currently, `sql::func` handles matching arguments to their respective
//! built-in functions (for most built-in functions, at least).
use std::cell::RefCell;
use std::collections::HashMap;
use std::fmt;
use itertools::Itertools;
use lazy_static::lazy_static;
use expr::func;
use ore::collections::CollectionExt;
use pgrepr::oid;
use repr::{ColumnName, ColumnType, Datum, RelationType, Row, ScalarBaseType, ScalarType};
use crate::ast::{SelectStatement, Statement};
use crate::names::PartialName;
use crate::plan::error::PlanError;
use crate::plan::expr::{
AggregateFunc, BinaryFunc, CoercibleScalarExpr, ColumnOrder, HirRelationExpr, HirScalarExpr,
NullaryFunc, ScalarWindowFunc, TableFunc, UnaryFunc, VariadicFunc,
};
use crate::plan::query::{self, ExprContext, QueryContext, QueryLifetime};
use crate::plan::scope::Scope;
use crate::plan::transform_ast;
use crate::plan::typeconv::{self, CastContext};
use crate::plan::StatementContext;
/// A specifier for a function or an operator.
#[derive(Clone, Copy, Debug)]
pub enum FuncSpec<'a> {
/// A function name.
Func(&'a PartialName),
/// An operator name.
Op(&'a str),
}
impl<'a> fmt::Display for FuncSpec<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
FuncSpec::Func(n) => n.fmt(f),
FuncSpec::Op(o) => o.fmt(f),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
/// Mirrored from [PostgreSQL's `typcategory`][typcategory].
///
/// Note that Materialize also uses a number of pseudotypes when planning, but
/// we have yet to need to integrate them with `TypeCategory`.
///
/// [typcategory]:
/// https://www.postgresql.org/docs/9.6/catalog-pg-type.html#CATALOG-TYPCATEGORY-TABLE
pub enum TypeCategory {
Array,
Bool,
DateTime,
List,
Numeric,
Pseudo,
String,
Timespan,
UserDefined,
}
impl TypeCategory {
/// Extracted from PostgreSQL 9.6.
/// ```sql,ignore
/// SELECT array_agg(typname), typcategory
/// FROM pg_catalog.pg_type
/// WHERE typname IN (
/// 'bool', 'bytea', 'date', 'float4', 'float8', 'int4', 'int8', 'interval', 'jsonb',
/// 'numeric', 'text', 'time', 'timestamp', 'timestamptz'
/// )
/// GROUP BY typcategory
/// ORDER BY typcategory;
/// ```
fn from_type(typ: &ScalarType) -> Self {
match typ {
ScalarType::Array(..) => Self::Array,
ScalarType::Bool => Self::Bool,
ScalarType::Bytes | ScalarType::Jsonb | ScalarType::Uuid => Self::UserDefined,
ScalarType::Date
| ScalarType::Time
| ScalarType::Timestamp
| ScalarType::TimestampTz => Self::DateTime,
ScalarType::Float32
| ScalarType::Float64
| ScalarType::Int16
| ScalarType::Int32
| ScalarType::Int64
| ScalarType::Oid
| ScalarType::RegClass
| ScalarType::RegProc
| ScalarType::RegType
| ScalarType::Numeric { .. } => Self::Numeric,
ScalarType::Interval => Self::Timespan,
ScalarType::List { .. } => Self::List,
ScalarType::String | ScalarType::Char { .. } | ScalarType::VarChar { .. } => {
Self::String
}
ScalarType::Record { .. } => Self::Pseudo,
ScalarType::Map { .. } => Self::Pseudo,
}
}
fn from_param(param: &ParamType) -> Self {
match param {
ParamType::Any
| ParamType::ArrayAny
| ParamType::ArrayElementAny
| ParamType::ListAny
| ParamType::ListElementAny
| ParamType::NonVecAny
| ParamType::MapAny
| ParamType::RecordAny => Self::Pseudo,
ParamType::Plain(t) => Self::from_type(t),
}
}
/// Extracted from PostgreSQL 9.6.
/// ```ignore
/// SELECT typcategory, typname, typispreferred
/// FROM pg_catalog.pg_type
/// WHERE typispreferred = true
/// ORDER BY typcategory;
/// ```
fn preferred_type(&self) -> Option<ScalarType> {
match self {
Self::Array | Self::List | Self::Pseudo | Self::UserDefined => None,
Self::Bool => Some(ScalarType::Bool),
Self::DateTime => Some(ScalarType::TimestampTz),
Self::Numeric => Some(ScalarType::Float64),
Self::String => Some(ScalarType::String),
Self::Timespan => Some(ScalarType::Interval),
}
}
}
/// Builds an expression that evaluates a scalar function on the provided
/// input expressions.
struct Operation<R>(
Box<
dyn Fn(
&ExprContext,
Vec<CoercibleScalarExpr>,
&ParamList,
Vec<ColumnOrder>,
) -> Result<R, PlanError>
+ Send
+ Sync,
>,
);
impl Operation<HirScalarExpr> {
/// Builds a unary operation that simply returns its input.
fn identity() -> Operation<HirScalarExpr> {
Operation::unary(|_ecx, e| Ok(e))
}
}
impl<R: GetReturnType> Operation<R> {
fn new<F>(f: F) -> Operation<R>
where
F: Fn(
&ExprContext,
Vec<CoercibleScalarExpr>,
&ParamList,
Vec<ColumnOrder>,
) -> Result<R, PlanError>
+ Send
+ Sync
+ 'static,
{
Operation(Box::new(f))
}
/// Builds an operation that takes no arguments.
fn nullary<F>(f: F) -> Operation<R>
where
F: Fn(&ExprContext) -> Result<R, PlanError> + Send + Sync + 'static,
{
Self::variadic(move |ecx, exprs| {
assert!(exprs.is_empty());
f(ecx)
})
}
/// Builds an operation that takes one argument.
fn unary<F>(f: F) -> Operation<R>
where
F: Fn(&ExprContext, HirScalarExpr) -> Result<R, PlanError> + Send + Sync + 'static,
{
Self::variadic(move |ecx, exprs| f(ecx, exprs.into_element()))
}
/// Builds an operation that takes one argument and an order_by.
fn unary_ordered<F>(f: F) -> Operation<R>
where
F: Fn(&ExprContext, HirScalarExpr, Vec<ColumnOrder>) -> Result<R, PlanError>
+ Send
+ Sync
+ 'static,
{
Self::new(move |ecx, cexprs, params, order_by| {
let exprs = coerce_args_to_types(ecx, cexprs, params)?;
f(ecx, exprs.into_element(), order_by)
})
}
/// Builds an operation that takes two arguments.
fn binary<F>(f: F) -> Operation<R>
where
F: Fn(&ExprContext, HirScalarExpr, HirScalarExpr) -> Result<R, PlanError>
+ Send
+ Sync
+ 'static,
{
Self::variadic(move |ecx, exprs| {
assert_eq!(exprs.len(), 2);
let mut exprs = exprs.into_iter();
let left = exprs.next().unwrap();
let right = exprs.next().unwrap();
f(ecx, left, right)
})
}
/// Builds an operation that takes two arguments and an order_by.
fn binary_ordered<F>(f: F) -> Operation<R>
where
F: Fn(&ExprContext, HirScalarExpr, HirScalarExpr, Vec<ColumnOrder>) -> Result<R, PlanError>
+ Send
+ Sync
+ 'static,
{
Self::new(move |ecx, cexprs, params, order_by| {
let exprs = coerce_args_to_types(ecx, cexprs, params)?;
assert_eq!(exprs.len(), 2);
let mut exprs = exprs.into_iter();
let left = exprs.next().unwrap();
let right = exprs.next().unwrap();
f(ecx, left, right, order_by)
})
}
/// Builds an operation that takes any number of arguments.
fn variadic<F>(f: F) -> Operation<R>
where
F: Fn(&ExprContext, Vec<HirScalarExpr>) -> Result<R, PlanError> + Send + Sync + 'static,
{
Self::new(move |ecx, cexprs, params, _order_by| {
let exprs = coerce_args_to_types(ecx, cexprs, params)?;
f(ecx, exprs)
})
}
}
/// Backing implementation for sql_impl_func and sql_impl_cast. See those
/// functions for details.
pub fn sql_impl(
expr: &'static str,
) -> impl Fn(&QueryContext, Vec<ScalarType>) -> Result<HirScalarExpr, PlanError> {
let expr =
sql_parser::parser::parse_expr(expr).expect("static function definition failed to parse");
move |qcx, types| {
// Reconstruct an expression context where the parameter types are
// bound to the types of the expressions in `args`.
let mut scx = qcx.scx.clone();
scx.param_types = RefCell::new(
types
.into_iter()
.enumerate()
.map(|(i, ty)| (i + 1, ty))
.collect(),
);
let mut qcx = QueryContext::root(&scx, qcx.lifetime);
// Desugar the expression
let mut expr = expr.clone();
transform_ast::transform_expr(&scx, &mut expr)?;
let expr = query::resolve_names_expr(&mut qcx, expr)?;
let ecx = ExprContext {
qcx: &qcx,
name: "static function definition",
scope: &Scope::empty(),
relation_type: &RelationType::empty(),
allow_aggregates: false,
allow_subqueries: true,
allow_windows: false,
};
// Plan the expression.
query::plan_expr(&ecx, &expr)?.type_as_any(&ecx)
}
}
// Constructs a definition for a built-in function out of a static SQL
// expression.
//
// The SQL expression should use the standard parameter syntax (`$1`, `$2`, ...)
// to refer to the inputs to the function. For example, a built-in function that
// takes two arguments and concatenates them with an arrow in between could be
// defined like so:
//
// sql_impl_func("$1 || '<->' || $2")
//
// The number of parameters in the SQL expression must exactly match the number
// of parameters in the built-in's declaration. There is no support for variadic
// functions.
fn sql_impl_func(expr: &'static str) -> Operation<HirScalarExpr> {
let invoke = sql_impl(expr);
Operation::variadic(move |ecx, args| {
let types = args.iter().map(|arg| ecx.scalar_type(arg)).collect();
let mut out = invoke(&ecx.qcx, types)?;
out.splice_parameters(&args, 0);
Ok(out)
})
}
// Defines a built-in table function from a static SQL SELECT statement.
//
// The SQL statement should use the standard parameter syntax (`$1`, `$2`, ...)
// to refer to the inputs to the function; see sql_impl_func for an example.
//
// The number of parameters in the SQL expression must exactly match the number
// of parameters in the built-in's declaration. There is no support for variadic
// functions.
//
// As this is a full SQL statement, it returns a set of rows, similar to a
// table function. The SELECT's projection's names are used and should be
// aliased if needed.
fn sql_impl_table_func_inner(
sql: &'static str,
experimental: Option<&'static str>,
) -> Operation<TableFuncPlan> {
let query = match sql_parser::parser::parse_statements(sql)
.expect("static function definition failed to parse")
.expect_element("static function definition must have exactly one statement")
{
Statement::Select(SelectStatement { query, as_of: None }) => query,
_ => panic!("static function definition expected SELECT statement"),
};
let invoke = move |qcx: &QueryContext, types: Vec<ScalarType>| {
// Reconstruct an expression context where the parameter types are
// bound to the types of the expressions in `args`.
let mut scx = qcx.scx.clone();
scx.param_types = RefCell::new(
types
.into_iter()
.enumerate()
.map(|(i, ty)| (i + 1, ty))
.collect(),
);
let mut qcx = QueryContext::root(&scx, qcx.lifetime);
let mut query = query.clone();
transform_ast::transform_query(&scx, &mut query)?;
let query = query::resolve_names(&mut qcx, query)?;
query::plan_nested_query(&mut qcx, &query)
};
Operation::variadic(move |ecx, args| {
if let Some(feature_name) = experimental {
ecx.require_experimental_mode(feature_name)?;
}
let types = args.iter().map(|arg| ecx.scalar_type(arg)).collect();
let (mut expr, scope) = invoke(&ecx.qcx, types)?;
expr.splice_parameters(&args, 0);
Ok(TableFuncPlan {
expr,
column_names: scope.column_names().cloned().collect(),
})
})
}
fn sql_impl_table_func(sql: &'static str) -> Operation<TableFuncPlan> {
sql_impl_table_func_inner(sql, None)
}
fn experimental_sql_impl_table_func(
feature: &'static str,
sql: &'static str,
) -> Operation<TableFuncPlan> {
sql_impl_table_func_inner(sql, Some(feature))
}
/// Describes a single function's implementation.
pub struct FuncImpl<R> {
oid: u32,
params: ParamList,
return_type: ReturnType,
op: Operation<R>,
}
/// Describes how each implementation should be represented in the catalog.
#[derive(Debug)]
pub struct FuncImplCatalogDetails {
pub oid: u32,
pub arg_oids: Vec<u32>,
pub variadic_oid: Option<u32>,
pub return_oid: Option<u32>,
pub return_is_set: bool,
}
impl<R: GetReturnType> FuncImpl<R> {
fn details(&self) -> FuncImplCatalogDetails {
FuncImplCatalogDetails {
oid: self.oid,
arg_oids: self.params.arg_oids(),
variadic_oid: self.params.variadic_oid(),
return_oid: self.return_type.typ.as_ref().map(|t| t.oid()),
return_is_set: self.return_type.is_set_of,
}
}
}
impl<R> fmt::Debug for FuncImpl<R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FuncImpl")
.field("oid", &self.oid)
.field("params", &self.params)
.field("ret", &self.return_type)
.field("op", &"<omitted>")
.finish()
}
}
impl From<NullaryFunc> for Operation<HirScalarExpr> {
fn from(n: NullaryFunc) -> Operation<HirScalarExpr> {
Operation::nullary(move |_ecx| Ok(HirScalarExpr::CallNullary(n.clone())))
}
}
impl From<UnaryFunc> for Operation<HirScalarExpr> {
fn from(u: UnaryFunc) -> Operation<HirScalarExpr> {
Operation::unary(move |_ecx, e| Ok(e.call_unary(u.clone())))
}
}
impl From<BinaryFunc> for Operation<HirScalarExpr> {
fn from(b: BinaryFunc) -> Operation<HirScalarExpr> {
Operation::binary(move |_ecx, left, right| Ok(left.call_binary(right, b.clone())))
}
}
impl From<VariadicFunc> for Operation<HirScalarExpr> {
fn from(v: VariadicFunc) -> Operation<HirScalarExpr> {
Operation::variadic(move |_ecx, exprs| {
Ok(HirScalarExpr::CallVariadic {
func: v.clone(),
exprs,
})
})
}
}
impl From<AggregateFunc> for Operation<(HirScalarExpr, AggregateFunc)> {
fn from(a: AggregateFunc) -> Operation<(HirScalarExpr, AggregateFunc)> {
Operation::unary(move |_ecx, e| Ok((e, a.clone())))
}
}
impl From<ScalarWindowFunc> for Operation<ScalarWindowFunc> {
fn from(a: ScalarWindowFunc) -> Operation<ScalarWindowFunc> {
Operation::nullary(move |_ecx| Ok(a.clone()))
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
/// Describes possible types of function parameters.
///
/// Note that this is not exhaustive and will likely require additions.
pub enum ParamList {
Exact(Vec<ParamType>),
Variadic(ParamType),
}
impl ParamList {
/// Determines whether `typs` are compatible with `self`.
fn matches_argtypes(&self, ecx: &ExprContext, typs: &[Option<ScalarType>]) -> bool {
if !self.validate_arg_len(typs.len()) {
return false;
}
for (i, typ) in typs.iter().enumerate() {
let param = &self[i];
if let Some(typ) = typ {
// Ensures either `typ` can at least be implicitly cast to a
// type `param` accepts. Implicit in this check is that unknown
// type arguments can be cast to any type.
//
// N.B. this will require more fallthrough checks once we
// support RECORD types in functions.
if !param.accepts_type(ecx, typ) {
return false;
}
}
}
!self.has_polymorphic() || self.resolve_polymorphic_types(typs).is_some()
}
/// Validates that the number of input elements are viable for `self`.
fn validate_arg_len(&self, input_len: usize) -> bool {
match self {
Self::Exact(p) => p.len() == input_len,
Self::Variadic(_) => input_len > 0,
}
}
/// Reports whether the parameter list contains any polymorphic parameters.
fn has_polymorphic(&self) -> bool {
match self {
ParamList::Exact(p) => p.iter().any(|p| p.is_polymorphic()),
ParamList::Variadic(p) => p.is_polymorphic(),
}
}
/// Enforces polymorphic type consistency by finding the concrete type that
/// satisfies the constraints expressed by the polymorphic types in the
/// parameter list.
///
/// Polymorphic type consistency constraints include:
/// - All arguments passed to `ArrayAny` must be `ScalarType::Array`s with
/// the same types of elements. All arguments passed to `ArrayElementAny`
/// must also be of these elements' type.
/// - All arguments passed to `ListAny` must be `ScalarType::List`s with the
/// same types of elements. All arguments passed to `ListElementAny` must
/// also be of these elements' type.
/// - All arguments passed to `MapAny` must be `ScalarType::Map`s with the
/// same type of value in each key, value pair.
///
/// Returns `Some` if the constraints were successfully resolved, or `None`
/// otherwise.
///
/// ## Custom types
///
/// Materialize supports two classes of types:
/// - Custom types, which are defined by `CREATE TYPE` or contain a
/// reference to a type that was.
/// - Built-in types, which are all other types, e.g. `int4`, `int4 list`.
///
/// Among built-in types there are:
/// - Complex types, which contain references to other types
/// - Simple types, which do not contain referneces to other types
///
/// To support accepting custom type values passed to polymorphic
/// parameters, we must handle polymorphism for custom types. To understand
/// how we assess custom types' polymorphism, it's useful to categorize
/// polymorphic parameters in MZ.
///
/// - **Complex parameters** include complex built-in types' polymorphic
/// parameters, e.g. `ListAny` and `MapAny`.
///
/// Valid `ScalarType`s passed to these parameters have a `custom_oid`
/// field and some embedded type, which we'll refer to as its element.
///
/// - **Element parameters** which include `ArrayElementAny`,
/// `ListElementAny` and `NonVecAny`.
///
/// Note that:
/// - Custom types can be used as values for either complex or element
/// parameters; we'll refer to these as custom complex values and custom
/// element values, or collectively as custom values.
/// - `ArrayAny` is slightly different from either case, but is uncommonly
/// used and not addressed further.
///
/// ### Resolution
///
/// - Upon encountering the first custom complex value:
/// - All other custom complex types must exactly match both its
/// `custom_oid` and embedded element.
/// - All custom element types must exactly match its embedded element
/// type.
///
/// One of the complexities here is that the custom complex value's
/// element can be built-in type, meaning any custom element values will
/// cause polymorphic resolution to fail.
///
/// - Upon encountering the first custom element value:
/// - All other custom element values must exactly match its type.
/// - All custom complex types' embedded elements must exactly match its
/// type.
///
/// ### Custom + built-in types
///
/// If you use both custom and built-in types, the resultant type will be
/// the least-custom custom type that fulfills the above requirements.
///
/// For example if you `list_append(int4 list list, custom_int4_list)`, the
/// resulant type will be complex: its `custom_oid` will be `None`, but its
/// embedded element will be the custom element type, i.e. `custom_int4_list
/// list`).
///
/// However, it's also important to note that a complex value whose
/// `custom_oid` is `None` are still considered complex if its embedded
/// element is complex. Consider the following scenario:
///
/// ```sql
/// CREATE TYPE int4_list_custom AS LIST (element_type=int4);
/// CREATE TYPE int4_list_list_custom AS LIST (element_type=int4_list_custom);
/// /* Errors because we won't coerce int4_list_custom list to
/// int4_list_list_custom */
/// SELECT '{{1}}'::int4_list_list_custom || '{{2}}'::int4_list_custom list;
/// ```
///
/// We will not coerce `int4_list_custom list` to
/// `int4_list_list_custom`––only built-in types are ever coerced into
/// custom types. It's also trivial for users to add a cast to ensure custom
/// type consistency.
fn resolve_polymorphic_types(&self, typs: &[Option<ScalarType>]) -> Option<ScalarType> {
// Determines if types have the same [`ScalarBaseType`], and if complex
// types' elements do, as well.
fn complex_base_eq(l: &ScalarType, r: &ScalarType) -> bool {
match (l, r) {
(ScalarType::Array(l), ScalarType::Array(r))
| (
ScalarType::List {
element_type: l, ..
},
ScalarType::List {
element_type: r, ..
},
)
| (ScalarType::Map { value_type: l, .. }, ScalarType::Map { value_type: r, .. }) => {
complex_base_eq(l, r)
}
(l, r) => ScalarBaseType::from(l) == ScalarBaseType::from(r),
}
}
let mut custom_oid_lock = false;
let mut element_lock = false;
let mut constrained_type: Option<ScalarType> = None;
// Determine the element on which to constrain the parameters.
for (i, typ) in typs.iter().enumerate() {
let param = &self[i];
match (param, typ, &mut constrained_type) {
(ParamType::ArrayAny, Some(typ), None) => {
constrained_type = Some(typ.clone());
}
(ParamType::ArrayAny, Some(typ), Some(constrained)) => {
if !complex_base_eq(typ, constrained) {
return None;
}
}
(ParamType::ListAny, Some(typ), None) | (ParamType::MapAny, Some(typ), None) => {
constrained_type = Some(typ.clone());
custom_oid_lock = typ.is_custom_type();
element_lock = typ.is_custom_type();
}
(ParamType::ListAny, Some(typ), Some(constrained))
| (ParamType::MapAny, Some(typ), Some(constrained)) => {
let element_accessor = match typ {
ScalarType::List { .. } => ScalarType::unwrap_list_element_type,
ScalarType::Map { .. } => ScalarType::unwrap_map_value_type,
_ => unreachable!(),
};
if (custom_oid_lock && typ.is_custom_type() && typ != constrained)
|| (element_lock
&& typ.is_custom_type()
&& element_accessor(typ) != element_accessor(constrained))
|| !complex_base_eq(typ, constrained)
{
return None;
}
if typ.is_custom_type() && !custom_oid_lock {
constrained_type = Some(typ.clone());
custom_oid_lock = true;
element_lock = true;
}
}
(ParamType::ArrayElementAny, Some(t), None) => {
constrained_type = Some(ScalarType::Array(Box::new(t.clone())));
element_lock = t.is_custom_type();
}
(ParamType::ArrayElementAny, Some(t), Some(constrained)) => {
let constrained_element_type = constrained.unwrap_array_element_type();
if (element_lock && t.is_custom_type() && t != constrained_element_type)
|| !complex_base_eq(t, &constrained_element_type)
{
return None;
}
if t.is_custom_type() && !element_lock {
constrained_type = Some(ScalarType::Array(Box::new(t.clone())));
element_lock = true;
}
}
(ParamType::ListElementAny, Some(t), None) => {
constrained_type = Some(ScalarType::List {
custom_oid: None,
element_type: Box::new(t.clone()),
});
element_lock = t.is_custom_type();
}
(ParamType::ListElementAny, Some(t), Some(constrained_list)) => {
let constrained_element_type = constrained_list.unwrap_list_element_type();
if (element_lock && t.is_custom_type() && t != constrained_element_type)
|| !complex_base_eq(t, &constrained_element_type)
{
return None;
}
if t.is_custom_type() && !element_lock {
constrained_type = Some(ScalarType::List {
custom_oid: None,
element_type: Box::new(t.clone()),
});
element_lock = true;
}
}
(ParamType::NonVecAny, Some(t), None) => {
constrained_type = Some(t.clone());
}
(ParamType::NonVecAny, Some(t), Some(constrained)) => {
if !complex_base_eq(t, &constrained) {
return None;
}
}
// These checks don't need to be more exhaustive (e.g. failing
// if arguments passed to `ListAny` are not `ScalarType::List`)
// because we've already done general type checking in
// `matches_argtypes`.
_ => {}
}
}
constrained_type
}
/// Matches a `&[ScalarType]` derived from the user's function argument
/// against this `ParamList`'s permitted arguments.
fn exact_match(&self, types: &[&ScalarType]) -> bool {
types.iter().enumerate().all(|(i, t)| self[i] == **t)
}
/// Generates values underlying data for for `mz_catalog.mz_functions.arg_ids`.
fn arg_oids(&self) -> Vec<u32> {
match self {
ParamList::Exact(p) => p.iter().map(|p| p.oid()).collect::<Vec<_>>(),
ParamList::Variadic(p) => vec![p.oid()],
}
}
/// Generates values for `mz_catalog.mz_functions.variadic_id`.
fn variadic_oid(&self) -> Option<u32> {
match self {
ParamList::Exact(_) => None,
ParamList::Variadic(p) => Some(p.oid()),
}
}
/// Returns a set of `CoercibleScalarExpr`s whose values are literal nulls,
/// typed such that they're compatible with `self`.
///
/// # Panics
///
/// Panics if called on a [`ParamList`] that contains any polymorphic
/// [`ParamType`]s.
fn contrive_coercible_exprs(&self) -> Vec<CoercibleScalarExpr> {
let i = match self {
ParamList::Exact(p) => p.clone(),
ParamList::Variadic(p) => {
vec![p.clone()]
}
};
i.iter()
.map(|p| {
CoercibleScalarExpr::Coerced(HirScalarExpr::literal_null(match p {
ParamType::Plain(t) => t.clone(),
o => unreachable!("o {:?} is polymorphic and doesn't have a ScalarType", o),
}))
})
.collect()
}
}
impl std::ops::Index<usize> for ParamList {
type Output = ParamType;
fn index(&self, i: usize) -> &Self::Output {
match self {
Self::Exact(p) => &p[i],
Self::Variadic(p) => &p,
}
}
}
/// Provides a shorthand function for writing `ParamList::Exact`.
impl From<Vec<ParamType>> for ParamList {
fn from(p: Vec<ParamType>) -> ParamList {
ParamList::Exact(p)
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
/// Describes parameter types; these are essentially just `ScalarType` with some
/// added flexibility.
pub enum ParamType {
/// A pseudotype permitting any type.
Any,
/// A polymorphic pseudotype permitting any array type. For more details,
/// see `ParamList::resolve_polymorphic_types`.
ArrayAny,
/// A polymorphic pseudotype permitting all types, with more constraints
/// than `Any`, i.e. it is subject to polymorphic constraints. For more
/// details, see `ParamList::resolve_polymorphic_types`.
ArrayElementAny,
/// A polymorphic pseudotype permitting a `ScalarType::List` of any element
/// type. For more details, see `ParamList::resolve_polymorphic_types`.
ListAny,
/// A polymorphic pseudotype permitting all types, with more constraints
/// than `Any`, i.e. it is subject to polymorphic constraints. For more
/// details, see `ParamList::resolve_polymorphic_types`.
ListElementAny,
/// A polymorphic pseudotype with the same behavior as `ListElementAny`,
/// except it does not permit either `ScalarType::Array` or
/// `ScalarType::List`.
NonVecAny,
/// A polymorphic pseudotype permitting a `ScalarType::Map` of any non-nested
/// value type. For more details, see `ParamList::resolve_polymorphic_types`.
MapAny,
/// A standard parameter that accepts arguments that match its embedded
/// `ScalarType`.
Plain(ScalarType),
/// A polymorphic pseudotype permitting a `ScalarType::Record` of any type.
/// Currently only used to express return values.
RecordAny,
}
impl ParamType {
/// Does `self` accept arguments of type `t`?
fn accepts_type(&self, ecx: &ExprContext, t: &ScalarType) -> bool {
use ParamType::*;
use ScalarType::*;
match self {
ArrayAny => matches!(t, Array(..)),
ListAny => matches!(t, List { .. }),
Any | ArrayElementAny | ListElementAny => true,
NonVecAny => !t.is_vec(),
MapAny => matches!(t, Map { .. }),
Plain(to) => typeconv::can_cast(ecx, CastContext::Implicit, t.clone(), to.clone()),
RecordAny => unreachable!("not yet supported for input"),
}
}
/// Does `t`'s [`TypeCategory`] prefer `self`? This question can make
/// more sense with the understanding that pseudotypes are never preferred.
fn is_preferred_by(&self, t: &ScalarType) -> bool {
if let Some(pt) = TypeCategory::from_type(t).preferred_type() {
*self == pt
} else {
false
}
}
/// Is `self` the preferred parameter type for its `TypeCategory`?
fn prefers_self(&self) -> bool {
if let Some(pt) = TypeCategory::from_param(self).preferred_type() {
*self == pt
} else {
false
}
}
fn is_polymorphic(&self) -> bool {
use ParamType::*;
match self {
ArrayAny | ArrayElementAny | ListAny | MapAny | ListElementAny | NonVecAny
| RecordAny => true,
Any | Plain(_) => false,
}
}
fn oid(&self) -> u32 {
match self {
ParamType::Plain(t) => match t {
ScalarType::List { custom_oid, .. } | ScalarType::Map { custom_oid, .. }
if custom_oid.is_some() =>
{
custom_oid.unwrap()
}
t => {
let t: pgrepr::Type = t.into();
t.oid()
}
},
ParamType::Any => postgres_types::Type::ANY.oid(),
ParamType::ArrayAny => postgres_types::Type::ANYARRAY.oid(),
ParamType::ArrayElementAny => postgres_types::Type::ANYELEMENT.oid(),
ParamType::ListAny => pgrepr::LIST.oid(),
ParamType::ListElementAny => postgres_types::Type::ANYELEMENT.oid(),
ParamType::MapAny => pgrepr::MAP.oid(),
ParamType::NonVecAny => postgres_types::Type::ANYNONARRAY.oid(),
ParamType::RecordAny => postgres_types::Type::RECORD.oid(),
}
}
}
impl PartialEq<ScalarType> for ParamType {
fn eq(&self, other: &ScalarType) -> bool {
match self {
ParamType::Plain(s) => s.base_eq(other),
// Pseudotypes never equal concrete types
_ => false,
}
}
}
impl PartialEq<ParamType> for ScalarType {
fn eq(&self, other: &ParamType) -> bool {
other == self
}
}
impl From<ScalarType> for ParamType {
fn from(s: ScalarType) -> ParamType {
ParamType::Plain(s)
}
}
impl From<ScalarBaseType> for ParamType {
fn from(s: ScalarBaseType) -> ParamType {
use ScalarBaseType::*;
let s = match s {
Array => return ParamType::ArrayAny,
List => return ParamType::ListAny,
Map => return ParamType::MapAny,
Record => return ParamType::RecordAny,
Bool => ScalarType::Bool,
Int16 => ScalarType::Int16,
Int32 => ScalarType::Int32,
Int64 => ScalarType::Int64,
Float32 => ScalarType::Float32,
Float64 => ScalarType::Float64,
Numeric => ScalarType::Numeric { scale: None },
Date => ScalarType::Date,
Time => ScalarType::Time,
Timestamp => ScalarType::Timestamp,
TimestampTz => ScalarType::TimestampTz,
Interval => ScalarType::Interval,
Bytes => ScalarType::Bytes,
String => ScalarType::String,
Char => ScalarType::Char { length: None },
VarChar => ScalarType::VarChar { length: None },
Jsonb => ScalarType::Jsonb,
Uuid => ScalarType::Uuid,
Oid => ScalarType::Oid,
RegClass => ScalarType::RegClass,
RegProc => ScalarType::RegProc,
RegType => ScalarType::RegType,
};
ParamType::Plain(s)
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ReturnType {
typ: Option<ParamType>,
is_set_of: bool,
}
impl ReturnType {
/// Expresses that a function's return type is a scalar value.
fn scalar(typ: ParamType) -> ReturnType {
ReturnType {
typ: Some(typ),
is_set_of: false,
}
}
/// Expresses that a function's return type is a set of values, e.g. a table
/// function.
fn set_of(typ: ParamType) -> ReturnType {
ReturnType {
typ: Some(typ),
is_set_of: true,
}
}
}
impl From<ParamType> for ReturnType {
fn from(typ: ParamType) -> ReturnType {
ReturnType::scalar(typ)
}
}
impl From<ScalarBaseType> for ReturnType {
fn from(s: ScalarBaseType) -> ReturnType {
ParamType::from(s).into()
}
}
impl From<ScalarType> for ReturnType {
fn from(s: ScalarType) -> ReturnType {
ParamType::Plain(s).into()
}
}
pub trait GetReturnType {
fn return_type(&self, ecx: &ExprContext, param_list: &ParamList) -> ReturnType;
}
impl GetReturnType for HirScalarExpr {
fn return_type(&self, ecx: &ExprContext, param_list: &ParamList) -> ReturnType {
fn assert_oti_len(oti: &[ColumnType], len: usize, name: &str) {
assert_eq!(
oti.len(),
len,
"{} requires exactly {} contrived input to automatically determine return type",
name,
len,
);
}
let mut output_type_inputs: Vec<ColumnType> = param_list
.contrive_coercible_exprs()
.into_iter()
.map(|c| {
let expr = c.type_as_any(&ecx).expect("c is typed NULL");
ecx.column_type(&expr)
})
.collect();
let c = match self {
HirScalarExpr::Literal(_row, column_type) => column_type.clone(),
HirScalarExpr::CallNullary(func) => {
assert_oti_len(&output_type_inputs, 0, "HirScalarExpr::CallNullary");
func.output_type()
}
HirScalarExpr::CallUnary { func, .. } => {
assert_oti_len(&output_type_inputs, 1, "HirScalarExpr::CallUnary");
func.output_type(output_type_inputs.remove(0))
}
HirScalarExpr::CallBinary { func, .. } => {
assert_oti_len(&output_type_inputs, 2, "HirScalarExpr::CallBinary");
func.output_type(output_type_inputs.remove(0), output_type_inputs.remove(0))
}
HirScalarExpr::CallVariadic { func, .. } => func.output_type(output_type_inputs),
other => unreachable!(
"unexepected HirScalarExpr in Operation<HirScalarExpr>::return_type: {:?}",
other
),
};
ReturnType::scalar(c.scalar_type.into())
}
}
impl GetReturnType for (HirScalarExpr, AggregateFunc) {
fn return_type(&self, ecx: &ExprContext, _param_list: &ParamList) -> ReturnType {
let c = ecx.column_type(&self.0);
let s = self.1.output_type(c).scalar_type;
ReturnType::scalar(s.into())
}
}
impl GetReturnType for ScalarWindowFunc {
fn return_type(&self, _ecx: &ExprContext, _param_list: &ParamList) -> ReturnType {
ReturnType::scalar(self.output_type().scalar_type.into())
}
}
impl GetReturnType for TableFuncPlan {
fn return_type(&self, _ecx: &ExprContext, _param_list: &ParamList) -> ReturnType {
let mut cols: Vec<ScalarType> = match &self.expr {
HirRelationExpr::CallTable { func, .. } => func
.output_type()
.column_types
.into_iter()
.map(|col| col.scalar_type)
.collect(),
other => unreachable!(
"unexepected HirRelationExpr in Operation<TableFuncPlan>::return_type: {:?}",
other
),
};
match cols.len() {
0 => ReturnType {
typ: None,
is_set_of: true,
},
1 => ReturnType::set_of(cols.remove(0).into()),
// Returned relation types with > 1 column are treated as records,
// irrespective of the return type we currently assess e.g.
// ```sql
// SELECT jsonb_each('{"a": 1}');
// jsonb_each
// ------------
// (a,1)
//
// SELECT pg_typeof(jsonb_each('{"a": 1}'));
// pg_typeof
// -----------
// record
// ```
_ => ReturnType::set_of(ParamType::RecordAny),
}
}
}
#[derive(Clone, Debug)]
/// Tracks candidate implementations.
pub struct Candidate<'a, R> {
/// The implementation under consideration.
fimpl: &'a FuncImpl<R>,
exact_matches: usize,
preferred_types: usize,
}
/// Selects the best implementation given the provided `args` using a
/// process similar to [PostgreSQL's parser][pgparser], and returns the
/// `ScalarExpr` to invoke that function.
///
/// Inline comments prefixed with number are taken from the "Function Type
/// Resolution" section of the aforelinked page.
///
/// # Errors
/// - When the provided arguments are not valid for any implementation, e.g.
/// cannot be converted to the appropriate types.
/// - When all implementations are equally valid.
///
/// [pgparser]: https://www.postgresql.org/docs/current/typeconv-oper.html
pub fn select_impl<R>(
ecx: &ExprContext,
spec: FuncSpec,
impls: &[FuncImpl<R>],
args: Vec<CoercibleScalarExpr>,
order_by: Vec<ColumnOrder>,
) -> Result<R, PlanError>
where
R: fmt::Debug,
{
let name = spec.to_string();
let ecx = &ecx.with_name(&name);
let types: Vec<_> = args.iter().map(|e| ecx.scalar_type(e)).collect();
select_impl_inner(ecx, impls, args, &types, order_by).map_err(|e| {
let types: Vec<_> = types
.into_iter()
.map(|ty| match ty {
Some(ty) => ecx.humanize_scalar_type(&ty),
None => "unknown".to_string(),
})
.collect();
let context = match (spec, types.as_slice()) {
(FuncSpec::Func(name), _) => {
format!("Cannot call function {}({})", name, types.join(", "))
}
(FuncSpec::Op(name), [typ]) => format!("no overload for {} {}", name, typ),
(FuncSpec::Op(name), [ltyp, rtyp]) => {
format!("no overload for {} {} {}", ltyp, name, rtyp)
}
(FuncSpec::Op(_), [..]) => unreachable!("non-unary non-binary operator"),
};
PlanError::Unstructured(format!("{}: {}", context, e))
})
}
fn select_impl_inner<R>(
ecx: &ExprContext,
impls: &[FuncImpl<R>],
cexprs: Vec<CoercibleScalarExpr>,
types: &[Option<ScalarType>],
order_by: Vec<ColumnOrder>,
) -> Result<R, PlanError>
where
R: fmt::Debug,
{
// 4.a. Discard candidate functions for which the input types do not
// match and cannot be converted (using an implicit conversion) to
// match. unknown literals are assumed to be convertible to anything for
// this purpose.
let impls: Vec<_> = impls
.iter()
.filter(|i| i.params.matches_argtypes(ecx, types))
.collect();
let f = find_match(ecx, types, impls)?;
(f.op.0)(ecx, cexprs, &f.params, order_by)
}
/// Finds an exact match based on the arguments, or, if no exact match, finds
/// the best match available. Patterned after [PostgreSQL's type conversion
/// matching algorithm][pgparser].
///
/// [pgparser]: https://www.postgresql.org/docs/current/typeconv-func.html
fn find_match<'a, R: std::fmt::Debug>(
ecx: &ExprContext,
types: &[Option<ScalarType>],
impls: Vec<&'a FuncImpl<R>>,
) -> Result<&'a FuncImpl<R>, PlanError> {
let all_types_known = types.iter().all(|t| t.is_some());
// Check for exact match.
if all_types_known {
let known_types: Vec<_> = types.iter().filter_map(|t| t.as_ref()).collect();
let matching_impls: Vec<&FuncImpl<_>> = impls
.iter()
.filter(|i| i.params.exact_match(&known_types))
.cloned()
.collect();
if matching_impls.len() == 1 {
return Ok(&matching_impls[0]);
}
}
// No exact match. Apply PostgreSQL's best match algorithm. Generate
// candidates by assessing their compatibility with each implementation's
// parameters.
let mut candidates: Vec<Candidate<_>> = Vec::new();
macro_rules! maybe_get_last_candidate {
() => {
if candidates.len() == 1 {
return Ok(&candidates[0].fimpl);
}
};
}
let mut max_exact_matches = 0;
for fimpl in impls {
let mut exact_matches = 0;
let mut preferred_types = 0;
for (i, arg_type) in types.iter().enumerate() {
let param_type = &fimpl.params[i];
match arg_type {
Some(arg_type) => {
if param_type == arg_type {
exact_matches += 1;
}
if param_type.is_preferred_by(arg_type) {
preferred_types += 1;
}
}
None => {
if param_type.prefers_self() {
preferred_types += 1;
}
}
}
}
// 4.a. Discard candidate functions for which the input types do not
// match and cannot be converted (using an implicit conversion) to
// match. unknown literals are assumed to be convertible to anything for
// this purpose.
max_exact_matches = std::cmp::max(max_exact_matches, exact_matches);
candidates.push(Candidate {
fimpl,
exact_matches,
preferred_types,
});
}
if candidates.is_empty() {
sql_bail!(
"arguments cannot be implicitly cast to any implementation's parameters; \
try providing explicit casts"
)
}
maybe_get_last_candidate!();
// 4.c. Run through all candidates and keep those with the most exact
// matches on input types. Keep all candidates if none have exact matches.
candidates.retain(|c| c.exact_matches >= max_exact_matches);
maybe_get_last_candidate!();
// 4.d. Run through all candidates and keep those that accept preferred
// types (of the input data type's type category) at the most positions
// where type conversion will be required.
let mut max_preferred_types = 0;
for c in &candidates {
max_preferred_types = std::cmp::max(max_preferred_types, c.preferred_types);
}
candidates.retain(|c| c.preferred_types >= max_preferred_types);
maybe_get_last_candidate!();
if all_types_known {
sql_bail!(
"unable to determine which implementation to use; try providing \
explicit casts to match parameter types"
)
}
let mut found_known = false;
let mut types_match = true;
let mut common_type: Option<ScalarType> = None;
for (i, arg_type) in types.iter().enumerate() {
let mut selected_category: Option<TypeCategory> = None;
let mut categories_match = true;
match arg_type {
// 4.e. If any input arguments are unknown, check the type
// categories accepted at those argument positions by the remaining
// candidates.
None => {
for c in candidates.iter() {
let this_category = TypeCategory::from_param(&c.fimpl.params[i]);
// 4.e. cont: Select the string category if any candidate
// accepts that category. (This bias towards string is
// appropriate since an unknown-type literal looks like a
// string.)
if this_category == TypeCategory::String {
selected_category = Some(TypeCategory::String);
break;
}
match selected_category {
Some(ref mut selected_category) => {
// 4.e. cont: [...otherwise,] if all the remaining candidates
// accept the same type category, select that category.
categories_match =
selected_category == &this_category && categories_match;
}
None => selected_category = Some(this_category.clone()),
}
}
// 4.e. cont: Otherwise fail because the correct choice cannot
// be deduced without more clues.
// (ed: this doesn't mean fail entirely, simply moving onto 4.f)
if selected_category != Some(TypeCategory::String) && !categories_match {
break;
}
// 4.e. cont: Now discard candidates that do not accept the
// selected type category. Furthermore, if any candidate accepts
// a preferred type in that category, discard candidates that
// accept non-preferred types for that argument.
let selected_category = selected_category.unwrap();
let preferred_type = selected_category.preferred_type();
let mut found_preferred_type_candidate = false;
candidates.retain(|c| {
if let Some(typ) = &preferred_type {
found_preferred_type_candidate = c.fimpl.params[i].accepts_type(ecx, typ)
|| found_preferred_type_candidate;
}
selected_category == TypeCategory::from_param(&c.fimpl.params[i])
});
if found_preferred_type_candidate {
let preferred_type = preferred_type.unwrap();
candidates.retain(|c| c.fimpl.params[i].accepts_type(ecx, &preferred_type));
}
}
Some(typ) => {
found_known = true;
// Track if all known types are of the same type; use this info
// in 4.f.
match common_type {
Some(ref common_type) => types_match = common_type == typ && types_match,
None => common_type = Some(typ.clone()),
}
}
}
}
maybe_get_last_candidate!();
// 4.f. If there are both unknown and known-type arguments, and all the
// known-type arguments have the same type, assume that the unknown
// arguments are also of that type, and check which candidates can accept
// that type at the unknown-argument positions.
// (ed: We know unknown argument exists if we're in this part of the code.)
if found_known && types_match {
let common_type = common_type.unwrap();
let common_typed: Vec<_> = types
.iter()
.map(|t| match t {
Some(t) => Some(t.clone()),
None => Some(common_type.clone()),
})
.collect();
candidates.retain(|c| c.fimpl.params.matches_argtypes(ecx, &common_typed));
maybe_get_last_candidate!();
}
sql_bail!(
"unable to determine which implementation to use; try providing \
explicit casts to match parameter types"
)
}
/// Coerces concrete arguments for a function according to the abstract
/// parameters specified in the function definition.
///
/// You must only call this function if `ParamList::matches_argtypes` has
/// verified that the `args` are valid for `params`.
fn coerce_args_to_types(
ecx: &ExprContext,
args: Vec<CoercibleScalarExpr>,
params: &ParamList,
) -> Result<Vec<HirScalarExpr>, PlanError> {
let types: Vec<_> = args.iter().map(|e| ecx.scalar_type(e)).collect();
let get_constrained_ty = || {
params
.resolve_polymorphic_types(&types)
.expect("function selection verifies that polymorphic types successfully resolved")
};
let do_convert =
|arg: CoercibleScalarExpr, ty: &ScalarType| arg.cast_to(ecx, CastContext::Implicit, ty);
let mut exprs = Vec::new();
for (i, arg) in args.into_iter().enumerate() {
let expr = match ¶ms[i] {
// Concrete type. Direct conversion.
ParamType::Plain(ty) => do_convert(arg, ty)?,
// Polymorphic pseudotypes. Convert based on constrained type.
ParamType::ArrayAny | ParamType::ListAny | ParamType::MapAny => {
do_convert(arg, &get_constrained_ty())?
}
ParamType::ArrayElementAny => {
let constrained_array = get_constrained_ty();
do_convert(arg, &constrained_array.unwrap_array_element_type())?
}
ParamType::ListElementAny => {
let constrained_list = get_constrained_ty();
do_convert(arg, &constrained_list.unwrap_list_element_type())?
}
ParamType::NonVecAny => {
let ty = get_constrained_ty();
assert!(!ty.is_vec());
do_convert(arg, &ty)?
}
// Special "any" psuedotype. Per PostgreSQL, uncoerced literals
// are accepted, but uncoerced parameters are rejected.
ParamType::Any => match arg {
CoercibleScalarExpr::Parameter(n) => {
sql_bail!("could not determine data type of parameter ${}", n)
}
_ => arg.type_as_any(ecx)?,
},
ParamType::RecordAny => unreachable!("records not yet supported for input"),
};
exprs.push(expr);
}
Ok(exprs)
}
/// Provides shorthand for converting `Vec<ScalarType>` into `Vec<ParamType>`.
macro_rules! params {
($p:ident...) => { ParamList::Variadic($p.into()) };
($($p:expr),*) => { ParamList::Exact(vec![$($p.into(),)*]) };
}
macro_rules! impl_def {
// Return type explicitly specified. This must be the case in situations
// such as:
// - Polymorphic functions: We have no way of understanding if the input
// type affects the return type, so you must tell us what the return type
// is.
// - Explicitly defined Operations whose returned expression does not
// appropriately correlate to the function itself, e.g. returning a
// UnaryFunc from a FuncImpl that takes two parameters.
// - Unimplemented/catalog-only functions
($params:expr, $op:expr, $return_type:expr, $oid:expr) => {{
FuncImpl {
oid: $oid,
params: $params.into(),
op: $op.into(),
return_type: $return_type.into(),
}
}};
// Return type can be automatically determined as a function of the
// parameters.
($params:expr, $op:expr, $oid:expr) => {{
let pcx = crate::plan::PlanContext::new(chrono::MIN_DATETIME, false);
let scx = StatementContext::new(None, &crate::catalog::DummyCatalog);
// This lifetime is compatible with more functions.
let qcx = QueryContext::root(&scx, QueryLifetime::OneShot(&pcx));
let ecx = ExprContext {
qcx: &qcx,
name: "dummy for builtin func return type eval",
scope: &Scope::empty(),
relation_type: &RelationType::empty(),
allow_aggregates: true,
allow_subqueries: false,
allow_windows: true,
};
let op = Operation::from($op);
let params = ParamList::from($params);
assert!(
!params.has_polymorphic(),
"loading builtin functions failed: polymorphic functions must have return types explicitly defined"
);
let cexprs = params.contrive_coercible_exprs();
let r = (op.0)(&ecx, cexprs, ¶ms, vec![]).unwrap();
let return_type = r.return_type(&ecx, ¶ms);
FuncImpl {
oid: $oid,
params,
op,
return_type,
}
}};
}
/// Constructs builtin function map.
macro_rules! builtins {
{
$(
$name:expr => $ty:ident {
$($params:expr => $op:expr $(=> $return_type:expr)?, $oid:expr;)+
}
),+
} => {{
let mut builtins = HashMap::new();
$(
let impls = vec![$(impl_def!($params, $op $(,$return_type)?, $oid)),+];
let old = builtins.insert($name, Func::$ty(impls));
assert!(old.is_none(), "duplicate entry in builtins list");
)+
builtins
}};
}
#[derive(Debug)]
pub struct TableFuncPlan {
pub expr: HirRelationExpr,
pub column_names: Vec<ColumnName>,
}
#[derive(Debug)]
pub enum Func {
Scalar(Vec<FuncImpl<HirScalarExpr>>),
Aggregate(Vec<FuncImpl<(HirScalarExpr, AggregateFunc)>>),
Table(Vec<FuncImpl<TableFuncPlan>>),
ScalarWindow(Vec<FuncImpl<ScalarWindowFunc>>),
}
impl Func {
pub fn func_impls(&self) -> Vec<FuncImplCatalogDetails> {
match self {
Func::Scalar(impls) => impls.iter().map(|f| f.details()).collect::<Vec<_>>(),
Func::Aggregate(impls) => impls.iter().map(|f| f.details()).collect::<Vec<_>>(),
Func::Table(impls) => impls.iter().map(|f| f.details()).collect::<Vec<_>>(),
Func::ScalarWindow(impls) => impls.iter().map(|f| f.details()).collect::<Vec<_>>(),
}
}
}
/// Functions using this macro should be transformed/planned away before
/// reaching function selection code, but still need to be present in the
/// catalog during planning.
macro_rules! catalog_name_only {
($name:expr) => {
panic!(
"{} should be planned away before reaching function selection",
$name
)
};
}
lazy_static! {
/// Correlates a built-in function name to its implementations.
pub static ref PG_CATALOG_BUILTINS: HashMap<&'static str, Func> = {
use ParamType::*;
use ScalarBaseType::*;
builtins! {
// Literal OIDs collected from PG 13 using a version of this query
// ```sql
// SELECT oid, proname, proargtypes::regtype[]
// FROM pg_proc
// WHERE proname IN (
// 'ascii', 'array_upper', 'jsonb_build_object'
// );
// ```
// Values are also available through
// https://github.com/postgres/postgres/blob/master/src/include/catalog/pg_proc.dat
// Scalars.
"abs" => Scalar {
params!(Int16) => UnaryFunc::AbsInt16(func::AbsInt16), 1398;
params!(Int32) => UnaryFunc::AbsInt32(func::AbsInt32), 1397;
params!(Int64) => UnaryFunc::AbsInt64(func::AbsInt64), 1396;
params!(Numeric) => UnaryFunc::AbsNumeric(func::AbsNumeric), 1705;
params!(Float32) => UnaryFunc::AbsFloat32(func::AbsFloat32), 1394;
params!(Float64) => UnaryFunc::AbsFloat64(func::AbsFloat64), 1395;
},
"array_cat" => Scalar {
params!(ArrayAny, ArrayAny) => Operation::binary(|ecx, lhs, rhs| {
ecx.require_experimental_mode("array_cat")?;
Ok(lhs.call_binary(rhs, BinaryFunc::ArrayArrayConcat))
}) => ArrayAny, 383;
},
"array_in" => Scalar {
params!(String, Oid, Int32) =>
Operation::unary(|_ecx, _e| bail_unsupported!("array_in")) => ArrayAny, 750;
},
"array_length" => Scalar {
params![ArrayAny, Int64] => BinaryFunc::ArrayLength => Int32, 2176;
},
"array_lower" => Scalar {
params!(ArrayAny, Int64) => BinaryFunc::ArrayLower => Int32, 2091;
},
"array_remove" => Scalar {
params!(ArrayAny, ArrayElementAny) => BinaryFunc::ArrayRemove => ArrayAny, 3167;
},
"array_to_string" => Scalar {
params!(ArrayAny, String) => Operation::variadic(array_to_string) => String, 395;
params!(ArrayAny, String, String) => Operation::variadic(array_to_string) => String, 384;
},
"array_upper" => Scalar {
params!(ArrayAny, Int64) => BinaryFunc::ArrayUpper => Int32, 2092;
},
"ascii" => Scalar {
params!(String) => UnaryFunc::Ascii, 1620;
},
"avg" => Scalar {
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("avg")) => Numeric, 2100;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("avg")) => Numeric, 2101;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("avg")) => Numeric, 2102;
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("avg")) => Float64, 2104;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("avg")) => Float64, 2105;
params!(Interval) => Operation::nullary(|_ecx| catalog_name_only!("avg")) => Interval, 2106;
},
"bit_length" => Scalar {
params!(Bytes) => UnaryFunc::BitLengthBytes, 1810;
params!(String) => UnaryFunc::BitLengthString, 1811;
},
"btrim" => Scalar {
params!(String) => UnaryFunc::TrimWhitespace, 885;
params!(String, String) => BinaryFunc::Trim, 884;
},
"cbrt" => Scalar {
params!(Float64) => UnaryFunc::CbrtFloat64(func::CbrtFloat64), 1345;
},
"ceil" => Scalar {
params!(Float32) => UnaryFunc::CeilFloat32(func::CeilFloat32), oid::FUNC_CEIL_F32_OID;
params!(Float64) => UnaryFunc::CeilFloat64(func::CeilFloat64), 2308;
params!(Numeric) => UnaryFunc::CeilNumeric(func::CeilNumeric), 1711;
},
"char_length" => Scalar {
params!(String) => UnaryFunc::CharLength, 1381;
},
"concat" => Scalar {
params!(Any...) => Operation::variadic(|ecx, cexprs| {
if cexprs.is_empty() {
sql_bail!("No function matches the given name and argument types. \
You might need to add explicit type casts.")
}
let mut exprs = vec![];
for expr in cexprs {
exprs.push(match ecx.scalar_type(&expr) {
// concat uses nonstandard bool -> string casts
// to match historical baggage in PostgreSQL.
ScalarType::Bool => expr.call_unary(UnaryFunc::CastBoolToStringNonstandard(func::CastBoolToStringNonstandard)),
// TODO(#7572): remove call to PadChar
ScalarType::Char { length } => expr.call_unary(UnaryFunc::PadChar(func::PadChar { length })),
_ => typeconv::to_string(ecx, expr)
});
}
Ok(HirScalarExpr::CallVariadic { func: VariadicFunc::Concat, exprs })
}) => String, 3058;
},
"convert_from" => Scalar {
params!(Bytes, String) => BinaryFunc::ConvertFrom, 1714;
},
"cos" => Scalar {
params!(Float64) => UnaryFunc::Cos(func::Cos), 1605;
},
"acos" => Scalar {
params!(Float64) => UnaryFunc::Acos(func::Acos), 1601;
},
"cosh" => Scalar {
params!(Float64) => UnaryFunc::Cosh(func::Cosh), 2463;
},
"acosh" => Scalar {
params!(Float64) => UnaryFunc::Acosh(func::Acosh), 2466;
},
"cot" => Scalar {
params!(Float64) => UnaryFunc::Cot(func::Cot), 1607;
},
"current_schema" => Scalar {
// TODO: this should be name
params!() => sql_impl_func("current_schemas(false)[1]") => String, 1402;
},
"current_schemas" => Scalar {
params!(Bool) => Operation::unary(|ecx, e| {
let with_sys = HirScalarExpr::literal_1d_array(
ecx.qcx.scx.catalog.search_path(true).iter().map(|s| Datum::String(s)).collect(),
ScalarType::String)?;
let without_sys = HirScalarExpr::literal_1d_array(
ecx.qcx.scx.catalog.search_path(false).iter().map(|s| Datum::String(s)).collect(),
ScalarType::String)?;
Ok(HirScalarExpr::If {
cond: Box::new(e),
then: Box::new(with_sys),
els: Box::new(without_sys),
})
// TODO: this should be name[]
}) => ScalarType::Array(Box::new(ScalarType::String)), 1403;
},
"current_database" => Scalar {
params!() => Operation::nullary(|ecx| {
let datum = Datum::String(ecx.qcx.scx.catalog.default_database());
Ok(HirScalarExpr::literal(datum, ScalarType::String))
}), 861;
},
"current_user" => Scalar {
params!() => Operation::nullary(|ecx| {
let datum = Datum::String(ecx.qcx.scx.catalog.user());
Ok(HirScalarExpr::literal(datum, ScalarType::String))
}), 745;
},
"session_user" => Scalar {
params!() => Operation::nullary(|ecx| {
let datum = Datum::String(ecx.qcx.scx.catalog.user());
Ok(HirScalarExpr::literal(datum, ScalarType::String))
}), 746;
},
"date_bin" => Scalar {
params!(Interval, Timestamp) => Operation::binary(|ecx, stride, source| {
ecx.require_experimental_mode("binary date_bin")?;
Ok(stride.call_binary(source, BinaryFunc::DateBinTimestamp))
}), oid::FUNC_MZ_DATE_BIN_UNIX_EPOCH_TS_OID;
params!(Interval, TimestampTz) => Operation::binary(|ecx, stride, source| {
ecx.require_experimental_mode("binary date_bin")?;
Ok(stride.call_binary(source, BinaryFunc::DateBinTimestampTz))
}), oid::FUNC_MZ_DATE_BIN_UNIX_EPOCH_TSTZ_OID;
params!(Interval, Timestamp, Timestamp) => VariadicFunc::DateBinTimestamp, 6177;
params!(Interval, TimestampTz, TimestampTz) => VariadicFunc::DateBinTimestampTz, 6178;
},
"extract" => Scalar {
params!(String, Interval) => BinaryFunc::ExtractInterval, 6204;
params!(String, Time) => BinaryFunc::ExtractTime, 6200;
params!(String, Timestamp) => BinaryFunc::ExtractTimestamp, 6202;
params!(String, TimestampTz) => BinaryFunc::ExtractTimestampTz, 6203;
params!(String, Date) => BinaryFunc::ExtractDate, 6199;
},
"date_part" => Scalar {
params!(String, Interval) => BinaryFunc::DatePartInterval, 1172;
params!(String, Time) => BinaryFunc::DatePartTime, 1385;
params!(String, Timestamp) => BinaryFunc::DatePartTimestamp, 2021;
params!(String, TimestampTz) => BinaryFunc::DatePartTimestampTz, 1171;
},
"date_trunc" => Scalar {
params!(String, Timestamp) => BinaryFunc::DateTruncTimestamp, 2020;
params!(String, TimestampTz) => BinaryFunc::DateTruncTimestampTz, 1217;
},
"degrees" => Scalar {
params!(Float64) => UnaryFunc::Degrees(func::Degrees), 1608;
},
"digest" => Scalar {
params!(String, String) => BinaryFunc::DigestString, 44154;
params!(Bytes, String) => BinaryFunc::DigestBytes, 44155;
},
"exp" => Scalar {
params!(Float64) => UnaryFunc::Exp(func::Exp), 1347;
params!(Numeric) => UnaryFunc::ExpNumeric(func::ExpNumeric), 1732;
},
"floor" => Scalar {
params!(Float32) => UnaryFunc::FloorFloat32(func::FloorFloat32), oid::FUNC_FLOOR_F32_OID;
params!(Float64) => UnaryFunc::FloorFloat64(func::FloorFloat64), 2309;
params!(Numeric) => UnaryFunc::FloorNumeric(func::FloorNumeric), 1712;
},
"format_type" => Scalar {
params!(Oid, Int32) => sql_impl_func(
"CASE
WHEN $1 IS NULL THEN NULL
ELSE coalesce((SELECT concat(name, mz_internal.mz_render_typemod($1, $2)) FROM mz_catalog.mz_types WHERE oid = $1), '???')
END"
) => String, 1081;
},
"hmac" => Scalar {
params!(String, String, String) => VariadicFunc::HmacString, 44156;
params!(Bytes, Bytes, String) => VariadicFunc::HmacBytes, 44157;
},
"jsonb_array_length" => Scalar {
params!(Jsonb) => UnaryFunc::JsonbArrayLength, 3207;
},
"jsonb_build_array" => Scalar {
params!() => VariadicFunc::JsonbBuildArray, 3272;
params!(Any...) => Operation::variadic(|ecx, exprs| Ok(HirScalarExpr::CallVariadic {
func: VariadicFunc::JsonbBuildArray,
exprs: exprs.into_iter().map(|e| typeconv::to_jsonb(ecx, e)).collect(),
})) => Jsonb, 3271;
},
"jsonb_build_object" => Scalar {
params!() => VariadicFunc::JsonbBuildObject, 3274;
params!(Any...) => Operation::variadic(|ecx, exprs| {
if exprs.len() % 2 != 0 {
sql_bail!("argument list must have even number of elements")
}
Ok(HirScalarExpr::CallVariadic {
func: VariadicFunc::JsonbBuildObject,
exprs: exprs.into_iter().tuples().map(|(key, val)| {
let key = typeconv::to_string(ecx, key);
let val = typeconv::to_jsonb(ecx, val);
vec![key, val]
}).flatten().collect(),
})
}) => Jsonb, 3273;
},
"jsonb_pretty" => Scalar {
params!(Jsonb) => UnaryFunc::JsonbPretty, 3306;
},
"jsonb_strip_nulls" => Scalar {
params!(Jsonb) => UnaryFunc::JsonbStripNulls, 3262;
},
"jsonb_typeof" => Scalar {
params!(Jsonb) => UnaryFunc::JsonbTypeof, 3210;
},
"left" => Scalar {
params!(String, Int32) => BinaryFunc::Left, 3060;
},
"length" => Scalar {
params!(Bytes) => UnaryFunc::ByteLengthBytes, 2010;
// bpcharlen is redundant with automatic coercion to string, 1318.
params!(String) => UnaryFunc::CharLength, 1317;
params!(Bytes, String) => BinaryFunc::EncodedBytesCharLength, 1713;
},
"ln" => Scalar {
params!(Float64) => UnaryFunc::Ln(func::Ln), 1341;
params!(Numeric) => UnaryFunc::LnNumeric(func::LnNumeric), 1734;
},
"log10" => Scalar {
params!(Float64) => UnaryFunc::Log10(func::Log10), 1194;
params!(Numeric) => UnaryFunc::Log10Numeric(func::Log10Numeric), 1481;
},
"log" => Scalar {
params!(Float64) => UnaryFunc::Log10(func::Log10), 1340;
params!(Numeric) => UnaryFunc::Log10Numeric(func::Log10Numeric), 1741;
params!(Numeric, Numeric) => BinaryFunc::LogNumeric, 1736;
},
"lower" => Scalar {
params!(String) => UnaryFunc::Lower, 870;
},
"lpad" => Scalar {
params!(String, Int64) => VariadicFunc::PadLeading, 879;
params!(String, Int64, String) => VariadicFunc::PadLeading, 873;
},
"ltrim" => Scalar {
params!(String) => UnaryFunc::TrimLeadingWhitespace, 881;
params!(String, String) => BinaryFunc::TrimLeading, 875;
},
"make_timestamp" => Scalar {
params!(Int64, Int64, Int64, Int64, Int64, Float64) => VariadicFunc::MakeTimestamp, 3461;
},
"md5" => Scalar {
params!(String) => Operation::unary(move |_ecx, input| {
let algorithm = HirScalarExpr::literal(Datum::String("md5"), ScalarType::String);
let encoding = HirScalarExpr::literal(Datum::String("hex"), ScalarType::String);
Ok(input.call_binary(algorithm, BinaryFunc::DigestString).call_binary(encoding, BinaryFunc::Encode))
}) => String, 2311;
params!(Bytes) => Operation::unary(move |_ecx, input| {
let algorithm = HirScalarExpr::literal(Datum::String("md5"), ScalarType::String);
let encoding = HirScalarExpr::literal(Datum::String("hex"), ScalarType::String);
Ok(input.call_binary(algorithm, BinaryFunc::DigestBytes).call_binary(encoding, BinaryFunc::Encode))
}) => String, 2321;
},
"mod" => Scalar {
params!(Numeric, Numeric) => Operation::nullary(|_ecx| catalog_name_only!("mod")) => Numeric, 1728;
params!(Int16, Int16) => Operation::nullary(|_ecx| catalog_name_only!("mod")) => Int16, 940;
params!(Int32, Int32) => Operation::nullary(|_ecx| catalog_name_only!("mod")) => Int32, 941;
params!(Int64, Int64) => Operation::nullary(|_ecx| catalog_name_only!("mod")) => Int64, 947;
},
"now" => Scalar {
params!() => Operation::nullary(|ecx| plan_current_timestamp(ecx, "now")), 1299;
},
"octet_length" => Scalar {
params!(Bytes) => UnaryFunc::ByteLengthBytes, 720;
params!(String) => UnaryFunc::ByteLengthString, 1374;
params!(Char) => Operation::unary(|ecx, e| {
let length = ecx.scalar_type(&e).unwrap_char_varchar_length();
Ok(e.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_unary(UnaryFunc::ByteLengthString)
)
}), 1375;
},
"obj_description" => Scalar {
params!(Oid, String) => Operation::binary(|_ecx, _oid, _catalog| {
// This function is meant to return the comment on a
// database object, but we don't presently support comments,
// so stubbed out out to always return NULL.
Ok(HirScalarExpr::literal_null(ScalarType::String))
}), 1215;
},
"pg_column_size" => Scalar {
params!(Any) => UnaryFunc::PgColumnSize(func::PgColumnSize) => Int32, 1269;
},
"mz_row_size" => Scalar {
params!(Any) => Operation::unary(|ecx, e| {
let s = ecx.scalar_type(&e);
if !matches!(s, ScalarType::Record{..}) {
sql_bail!("mz_row_size requires a record type");
}
Ok(e.call_unary(UnaryFunc::MzRowSize(func::MzRowSize)))
}) => Int32, oid::FUNC_MZ_ROW_SIZE;
},
"pg_encoding_to_char" => Scalar {
// Materialize only supports UT8-encoded databases. Return 'UTF8' if Postgres'
// encoding id for UTF8 (6) is provided, otherwise return 'NULL'.
params!(Int64) => sql_impl_func("CASE WHEN $1 = 6 THEN 'UTF8' ELSE NULL END") => String, 1597;
},
"pg_backend_pid" => Scalar {
params!() => Operation::nullary(|_ecx| {
Ok(HirScalarExpr::literal(
Datum::from(-1),
ScalarType::Int32,
))
}), 2026;
},
// pg_get_constraintdef gives more info about a constraint with in the `pg_constraint`
// view. It currently returns no information as the `pg_constraint` view is empty in
// materialize
"pg_get_constraintdef" => Scalar {
params!(Oid) => UnaryFunc::PgGetConstraintdef(func::PgGetConstraintdef), 1387;
params!(Oid, Bool) => BinaryFunc::PgGetConstraintdef, 2508;
},
// pg_get_expr is meant to convert the textual version of
// pg_node_tree data into parseable expressions. However, we don't
// use the pg_get_expr structure anywhere and the equivalent columns
// in Materialize (e.g. index expressions) are already stored as
// parseable expressions. So, we offer this function in the catalog
// for ORM support, but make no effort to provide its semantics,
// e.g. this also means we drop the Oid argument on the floor.
"pg_get_expr" => Scalar {
params!(String, Oid) => Operation::binary(|_ecx, l, _r| Ok(l)), 1716;
params!(String, Oid, Bool) => Operation::variadic(move |_ecx, mut args| Ok(args.remove(0))), 2509;
},
"pg_get_userbyid" => Scalar {
params!(Oid) => sql_impl_func("'unknown (OID=' || $1 || ')'") => String, 1642;
},
"pg_postmaster_start_time" => Scalar {
params!() => Operation::nullary(pg_postmaster_start_time), 2560;
},
"pg_table_is_visible" => Scalar {
params!(Oid) => sql_impl_func(
"(SELECT s.name = ANY(current_schemas(true))
FROM mz_catalog.mz_objects o JOIN mz_catalog.mz_schemas s ON o.schema_id = s.id
WHERE o.oid = $1)"
) => Bool, 2079;
},
"pg_type_is_visible" => Scalar {
params!(Oid) => sql_impl_func(
"(SELECT s.name = ANY(current_schemas(true))
FROM mz_catalog.mz_types t JOIN mz_catalog.mz_schemas s ON t.schema_id = s.id
WHERE t.oid = $1)"
) => Bool, 2080;
},
"pg_typeof" => Scalar {
params!(Any) => Operation::new(|ecx, exprs, params, _order_by| {
// pg_typeof reports the type *before* coercion.
let name = match ecx.scalar_type(&exprs[0]) {
None => "unknown".to_string(),
Some(ty) => ecx.humanize_scalar_type(&ty),
};
// For consistency with other functions, verify that
// coercion is possible, though we don't actually care about
// the coerced results.
coerce_args_to_types(ecx, exprs, params)?;
// TODO(benesch): make this function have return type
// regtype, when we support that type. Document the function
// at that point. For now, it's useful enough to have this
// halfway version that returns a string.
Ok(HirScalarExpr::literal(Datum::String(&name), ScalarType::String))
}) => String, 1619;
},
"position" => Scalar {
params!(String, String) => BinaryFunc::Position, 849;
},
"pow" => Scalar {
params!(Float64, Float64) => Operation::nullary(|_ecx| catalog_name_only!("pow")) => Float64, 1346;
},
"power" => Scalar {
params!(Float64, Float64) => BinaryFunc::Power, 1368;
params!(Numeric, Numeric) => BinaryFunc::PowerNumeric, 2169;
},
"radians" => Scalar {
params!(Float64) => UnaryFunc::Radians(func::Radians), 1609;
},
"repeat" => Scalar {
params!(String, Int32) => BinaryFunc::RepeatString, 1622;
},
"regexp_match" => Scalar {
params!(String, String) => VariadicFunc::RegexpMatch => ScalarType::Array(Box::new(ScalarType::String)), 3396;
params!(String, String, String) => VariadicFunc::RegexpMatch => ScalarType::Array(Box::new(ScalarType::String)), 3397;
},
"replace" => Scalar {
params!(String, String, String) => VariadicFunc::Replace, 2087;
},
"right" => Scalar {
params!(String, Int32) => BinaryFunc::Right, 3061;
},
"round" => Scalar {
params!(Float32) => UnaryFunc::RoundFloat32(func::RoundFloat32), oid::FUNC_ROUND_F32_OID;
params!(Float64) => UnaryFunc::RoundFloat64(func::RoundFloat64), 1342;
params!(Numeric) => UnaryFunc::RoundNumeric(func::RoundNumeric), 1708;
params!(Numeric, Int32) => BinaryFunc::RoundNumeric, 1707;
},
"rtrim" => Scalar {
params!(String) => UnaryFunc::TrimTrailingWhitespace, 882;
params!(String, String) => BinaryFunc::TrimTrailing, 876;
},
"sha224" => Scalar {
params!(Bytes) => digest("sha224") => Bytes, 3419;
},
"sha256" => Scalar {
params!(Bytes) => digest("sha256") => Bytes, 3420;
},
"sha384" => Scalar {
params!(Bytes) => digest("sha384") => Bytes, 3421;
},
"sha512" => Scalar {
params!(Bytes) => digest("sha512") => Bytes, 3422;
},
"sin" => Scalar {
params!(Float64) => UnaryFunc::Sin(func::Sin), 1604;
},
"asin" => Scalar {
params!(Float64) => UnaryFunc::Asin(func::Asin), 1600;
},
"sinh" => Scalar {
params!(Float64) => UnaryFunc::Sinh(func::Sinh), 2462;
},
"asinh" => Scalar {
params!(Float64) => UnaryFunc::Asinh(func::Asinh), 2465;
},
"split_part" => Scalar {
params!(String, String, Int64) => VariadicFunc::SplitPart, 2088;
},
"stddev" => Scalar {
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("stddev")) => Float64, 2157;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("stddev")) => Float64, 2158;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("stddev")) => Numeric, 2156;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("stddev")) => Numeric, 2155;
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("stddev")) => Numeric, 2154;
},
"stddev_pop" => Scalar {
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("stddev_pop")) => Float64, 2727;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("stddev_pop")) => Float64, 2728;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("stddev_pop")) => Numeric, 2726;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("stddev_pop")) => Numeric, 2725;
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("stddev_pop")) => Numeric , 2724;
},
"stddev_samp" => Scalar {
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("stddev_samp")) => Float64, 2715;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("stddev_samp")) => Float64, 2716;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("stddev_samp")) => Numeric, 2714;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("stddev_samp")) => Numeric, 2713;
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("stddev_samp")) => Numeric, 2712;
},
"substr" => Scalar {
params!(String, Int64) => VariadicFunc::Substr, 883;
params!(String, Int64, Int64) => VariadicFunc::Substr, 877;
},
"substring" => Scalar {
params!(String, Int64) => VariadicFunc::Substr, 937;
params!(String, Int64, Int64) => VariadicFunc::Substr, 936;
},
"sqrt" => Scalar {
params!(Float64) => UnaryFunc::SqrtFloat64(func::SqrtFloat64), 1344;
params!(Numeric) => UnaryFunc::SqrtNumeric(func::SqrtNumeric), 1730;
},
"tan" => Scalar {
params!(Float64) => UnaryFunc::Tan(func::Tan), 1606;
},
"atan" => Scalar {
params!(Float64) => UnaryFunc::Atan(func::Atan), 1602;
},
"tanh" => Scalar {
params!(Float64) => UnaryFunc::Tanh(func::Tanh), 2464;
},
"atanh" => Scalar {
params!(Float64) => UnaryFunc::Atanh(func::Atanh), 2467;
},
"timezone" => Scalar {
params!(String, Timestamp) => BinaryFunc::TimezoneTimestamp, 2069;
params!(String, TimestampTz) => BinaryFunc::TimezoneTimestampTz, 1159;
// PG defines this as `text timetz`
params!(String, Time) => Operation::binary(|ecx, lhs, rhs| {
match ecx.qcx.lifetime {
QueryLifetime::OneShot(pcx) => {
let wall_time = pcx.wall_time.naive_utc();
Ok(lhs.call_binary(rhs, BinaryFunc::TimezoneTime{wall_time}))
},
QueryLifetime::Static => sql_bail!("timezone cannot be used in static queries"),
}
}), 2037;
params!(Interval, Timestamp) => BinaryFunc::TimezoneIntervalTimestamp, 2070;
params!(Interval, TimestampTz) => BinaryFunc::TimezoneIntervalTimestampTz, 1026;
// PG defines this as `interval timetz`
params!(Interval, Time) => BinaryFunc::TimezoneIntervalTime, 2038;
},
"to_char" => Scalar {
params!(Timestamp, String) => BinaryFunc::ToCharTimestamp, 2049;
params!(TimestampTz, String) => BinaryFunc::ToCharTimestampTz, 1770;
},
// > Returns the value as json or jsonb. Arrays and composites
// > are converted (recursively) to arrays and objects;
// > otherwise, if there is a cast from the type to json, the
// > cast function will be used to perform the conversion;
// > otherwise, a scalar value is produced. For any scalar type
// > other than a number, a Boolean, or a null value, the text
// > representation will be used, in such a fashion that it is a
// > valid json or jsonb value.
//
// https://www.postgresql.org/docs/current/functions-json.html
"to_jsonb" => Scalar {
params!(Any) => Operation::unary(|ecx, e| {
// TODO(#7572): remove this
let e = match ecx.scalar_type(&e) {
ScalarType::Char { length } => e.call_unary(UnaryFunc::PadChar(func::PadChar { length })),
_ => e,
};
Ok(typeconv::to_jsonb(ecx, e))
}) => Jsonb, 3787;
},
"to_timestamp" => Scalar {
params!(Float64) => UnaryFunc::ToTimestamp(func::ToTimestamp), 1158;
},
"upper" => Scalar {
params!(String) => UnaryFunc::Upper, 871;
},
"variance" => Scalar {
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("variance")) => Float64, 2151;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("variance")) => Float64, 2152;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("variance")) => Numeric, 2150;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("variance")) => Numeric, 2149;
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("variance")) => Numeric, 2148;
},
"var_pop" => Scalar {
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("var_pop")) => Float64, 2721;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("var_pop")) => Float64, 2722;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("var_pop")) => Numeric, 2720;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("var_pop")) => Numeric, 2719;
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("var_pop")) => Numeric, 2718;
},
"var_samp" => Scalar {
params!(Float32) => Operation::nullary(|_ecx| catalog_name_only!("var_samp")) => Float64, 2644;
params!(Float64) => Operation::nullary(|_ecx| catalog_name_only!("var_samp")) => Float64, 2645;
params!(Int16) => Operation::nullary(|_ecx| catalog_name_only!("var_samp")) => Numeric, 2643;
params!(Int32) => Operation::nullary(|_ecx| catalog_name_only!("var_samp")) => Numeric, 2642;
params!(Int64) => Operation::nullary(|_ecx| catalog_name_only!("var_samp")) => Numeric, 2641;
},
"version" => Scalar {
params!() => Operation::nullary(|ecx| {
let build_info = ecx.catalog().config().build_info;
let version = format!(
"PostgreSQL 9.6 on {} (materialized {})",
build_info.target_triple, build_info.version,
);
Ok(HirScalarExpr::literal(Datum::String(&version), ScalarType::String))
}), 89;
},
// Aggregates.
"array_agg" => Aggregate {
params!(NonVecAny) => Operation::unary_ordered(|ecx, e, order_by| {
if let ScalarType::Char {.. } = ecx.scalar_type(&e) {
bail_unsupported!("array_agg on char");
};
// ArrayConcat excepts all inputs to be arrays, so wrap all input datums into
// arrays.
let e_arr = HirScalarExpr::CallVariadic{
func: VariadicFunc::ArrayCreate { elem_type: ecx.scalar_type(&e) },
exprs: vec![e],
};
Ok((e_arr, AggregateFunc::ArrayConcat { order_by }))
}) => ArrayAny, 2335;
params!(ArrayAny) => Operation::unary(|_ecx, _e| bail_unsupported!("array_agg on arrays")) => ArrayAny, 4053;
},
"bool_and" => Aggregate {
params!(Any) => Operation::unary(|_ecx, _e| bail_unsupported!("bool_and")) => Bool, 2517;
},
"bool_or" => Aggregate {
params!(Any) => Operation::unary(|_ecx, _e| bail_unsupported!("bool_or")) => Bool, 2518;
},
"count" => Aggregate {
params!() => Operation::nullary(|_ecx| {
// COUNT(*) is equivalent to COUNT(true).
Ok((HirScalarExpr::literal_true(), AggregateFunc::Count))
}), 2803;
params!(Any) => AggregateFunc::Count => Int32, 2147;
},
"max" => Aggregate {
params!(Bool) => AggregateFunc::MaxBool, oid::FUNC_MAX_BOOL_OID;
params!(Int16) => AggregateFunc::MaxInt16, 2117;
params!(Int32) => AggregateFunc::MaxInt32, 2116;
params!(Int64) => AggregateFunc::MaxInt64, 2115;
params!(Float32) => AggregateFunc::MaxFloat32, 2119;
params!(Float64) => AggregateFunc::MaxFloat64, 2120;
params!(String) => AggregateFunc::MaxString, 2129;
// TODO(#7572): make this its own function
params!(Char) => AggregateFunc::MaxString, 2244;
params!(Date) => AggregateFunc::MaxDate, 2122;
params!(Timestamp) => AggregateFunc::MaxTimestamp, 2126;
params!(TimestampTz) => AggregateFunc::MaxTimestampTz, 2127;
params!(Numeric) => AggregateFunc::MaxNumeric, oid::FUNC_MAX_NUMERIC_OID;
},
"min" => Aggregate {
params!(Bool) => AggregateFunc::MinBool, oid::FUNC_MIN_BOOL_OID;
params!(Int16) => AggregateFunc::MinInt32, 2133;
params!(Int32) => AggregateFunc::MinInt32, 2132;
params!(Int64) => AggregateFunc::MinInt64, 2131;
params!(Float32) => AggregateFunc::MinFloat32, 2135;
params!(Float64) => AggregateFunc::MinFloat64, 2136;
params!(String) => AggregateFunc::MinString, 2145;
// TODO(#7572): make this its own function
params!(Char) => AggregateFunc::MinString, 2245;
params!(Date) => AggregateFunc::MinDate, 2138;
params!(Timestamp) => AggregateFunc::MinTimestamp, 2142;
params!(TimestampTz) => AggregateFunc::MinTimestampTz, 2143;
params!(Numeric) => AggregateFunc::MinNumeric, oid::FUNC_MIN_NUMERIC_OID;
},
"json_agg" => Aggregate {
params!(Any) => Operation::unary(|_ecx, _e| bail_unsupported!("json_agg")) => Jsonb, 3175;
},
"jsonb_agg" => Aggregate {
params!(Any) => Operation::unary_ordered(|ecx, e, order_by| {
// TODO(#7572): remove this
let e = match ecx.scalar_type(&e) {
ScalarType::Char { length } => e.call_unary(UnaryFunc::PadChar(func::PadChar { length })),
_ => e,
};
// `AggregateFunc::JsonbAgg` filters out `Datum::Null` (it
// needs to have *some* identity input), but the semantics
// of the SQL function require that `Datum::Null` is treated
// as `Datum::JsonbNull`. This call to `coalesce` converts
// between the two semantics.
let json_null = HirScalarExpr::literal(Datum::JsonNull, ScalarType::Jsonb);
let e = HirScalarExpr::CallVariadic {
func: VariadicFunc::Coalesce,
exprs: vec![typeconv::to_jsonb(ecx, e), json_null],
};
Ok((e, AggregateFunc::JsonbAgg { order_by }))
}) => Jsonb, 3267;
},
"jsonb_object_agg" => Aggregate {
params!(Any, Any) => Operation::binary_ordered(|ecx, key, val, order_by| {
// TODO(#7572): remove this
let key = match ecx.scalar_type(&key) {
ScalarType::Char { length } => key.call_unary(UnaryFunc::PadChar(func::PadChar { length })),
_ => key,
};
let val = match ecx.scalar_type(&val) {
ScalarType::Char { length } => val.call_unary(UnaryFunc::PadChar(func::PadChar { length })),
_ => val,
};
let key = typeconv::to_string(ecx, key);
let val = typeconv::to_jsonb(ecx, val);
let e = HirScalarExpr::CallVariadic {
func: VariadicFunc::RecordCreate {
field_names: vec![ColumnName::from("key"), ColumnName::from("val")],
},
exprs: vec![key, val],
};
Ok((e, AggregateFunc::JsonbObjectAgg { order_by }))
}) => Jsonb, 3270;
},
"string_agg" => Aggregate {
params!(String, String) => Operation::binary_ordered(|_ecx, value, sep, order_by| {
let e = HirScalarExpr::CallVariadic {
func: VariadicFunc::RecordCreate {
field_names: vec![ColumnName::from("value"), ColumnName::from("sep")],
},
exprs: vec![value, sep],
};
Ok((e, AggregateFunc::StringAgg { order_by }))
}), 3538;
params!(Bytes, Bytes) => Operation::binary(|_ecx, _l, _r| bail_unsupported!("string_agg")) => Bytes, 3545;
},
"sum" => Aggregate {
params!(Int16) => AggregateFunc::SumInt16, 2109;
params!(Int32) => AggregateFunc::SumInt32, 2108;
params!(Int64) => AggregateFunc::SumInt64, 2107;
params!(Float32) => AggregateFunc::SumFloat32, 2110;
params!(Float64) => AggregateFunc::SumFloat64, 2111;
params!(Numeric) => AggregateFunc::SumNumeric, 2114;
params!(Interval) => Operation::unary(|_ecx, _e| {
// Explicitly providing this unsupported overload
// prevents `sum(NULL)` from choosing the `Float64`
// implementation, so that we match PostgreSQL's behavior.
// Plus we will one day want to support this overload.
bail_unsupported!("sum(interval)");
}) => Interval, 2113;
},
// Scalar window functions.
"row_number" => ScalarWindow {
params!() => ScalarWindowFunc::RowNumber, 3100;
},
// Table functions.
"generate_series" => Table {
params!(Int32, Int32, Int32) => Operation::variadic(move |_ecx, exprs| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSeriesInt32,
exprs,
},
column_names: vec!["generate_series".into()],
})
}), 1066;
params!(Int32, Int32) => Operation::binary(move |_ecx, start, stop| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSeriesInt32,
exprs: vec![start, stop, HirScalarExpr::literal(Datum::Int32(1), ScalarType::Int32)],
},
column_names: vec!["generate_series".into()],
})
}), 1067;
params!(Int64, Int64, Int64) => Operation::variadic(move |_ecx, exprs| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSeriesInt64,
exprs,
},
column_names: vec!["generate_series".into()],
})
}), 1068;
params!(Int64, Int64) => Operation::binary(move |_ecx, start, stop| {
let row = Row::pack(&[Datum::Int64(1)]);
let column_type = ColumnType { scalar_type: ScalarType::Int64, nullable: false };
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSeriesInt64,
exprs: vec![start, stop, HirScalarExpr::Literal(row, column_type)],
},
column_names: vec!["generate_series".into()],
})
}), 1069;
params!(Timestamp, Timestamp, Interval) => Operation::variadic(move |_ecx, exprs| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSeriesTimestamp,
exprs,
},
column_names: vec!["generate_series".into()],
})
}), 938;
params!(TimestampTz, TimestampTz, Interval) => Operation::variadic(move |_ecx, exprs| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSeriesTimestampTz,
exprs,
},
column_names: vec!["generate_series".into()],
})
}), 939;
},
"generate_subscripts" => Table {
params!(ArrayAny, Int32) => Operation::variadic(move |_ecx, exprs| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::GenerateSubscriptsArray,
exprs,
},
column_names: vec!["generate_subscripts".into()],
})
}) => ReturnType::set_of(Int32.into()), 1192;
},
"jsonb_array_elements" => Table {
params!(Jsonb) => Operation::unary(move |_ecx, jsonb| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::JsonbArrayElements { stringify: false },
exprs: vec![jsonb],
},
column_names: vec!["value".into()],
})
}), 3219;
},
"jsonb_array_elements_text" => Table {
params!(Jsonb) => Operation::unary(move |_ecx, jsonb| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::JsonbArrayElements { stringify: true },
exprs: vec![jsonb],
},
column_names: vec!["value".into()],
})
}), 3465;
},
"jsonb_each" => Table {
params!(Jsonb) => Operation::unary(move |_ecx, jsonb| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::JsonbEach { stringify: false },
exprs: vec![jsonb],
},
column_names: vec!["key".into(), "value".into()],
})
}), 3208;
},
"jsonb_each_text" => Table {
params!(Jsonb) => Operation::unary(move |_ecx, jsonb| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::JsonbEach { stringify: true },
exprs: vec![jsonb],
},
column_names: vec!["key".into(), "value".into()],
})
}), 3932;
},
"jsonb_object_keys" => Table {
params!(Jsonb) => Operation::unary(move |_ecx, jsonb| {
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::JsonbObjectKeys,
exprs: vec![jsonb],
},
column_names: vec!["jsonb_object_keys".into()],
})
}), 3931;
},
// Note that these implementations' input to `generate_series` is
// contrived to match Flink's expected values. There are other,
// equally valid windows we could generate.
"date_bin_hopping" => Table {
// (hop, width, timestamp)
params!(Interval, Interval, Timestamp) => experimental_sql_impl_table_func("date_bin_hopping", "
SELECT *
FROM pg_catalog.generate_series(
pg_catalog.date_bin($1, $3 + $1, '1970-01-01') - $2, $3, $1
) AS dbh(date_bin_hopping)
") => ReturnType::set_of(Timestamp.into()), oid::FUNC_MZ_DATE_BIN_HOPPING_UNIX_EPOCH_TS_OID;
// (hop, width, timestamp)
params!(Interval, Interval, TimestampTz) => experimental_sql_impl_table_func("date_bin_hopping", "
SELECT *
FROM pg_catalog.generate_series(
pg_catalog.date_bin($1, $3 + $1, '1970-01-01') - $2, $3, $1
) AS dbh(date_bin_hopping)
") => ReturnType::set_of(TimestampTz.into()), oid::FUNC_MZ_DATE_BIN_HOPPING_UNIX_EPOCH_TSTZ_OID;
// (hop, width, timestamp, origin)
params!(Interval, Interval, Timestamp, Timestamp) => experimental_sql_impl_table_func("date_bin_hopping", "
SELECT *
FROM pg_catalog.generate_series(
pg_catalog.date_bin($1, $3 + $1, $4) - $2, $3, $1
) AS dbh(date_bin_hopping)
") => ReturnType::set_of(Timestamp.into()), oid::FUNC_MZ_DATE_BIN_HOPPING_TS_OID;
// (hop, width, timestamp, origin)
params!(Interval, Interval, TimestampTz, TimestampTz) => experimental_sql_impl_table_func("date_bin_hopping", "
SELECT *
FROM pg_catalog.generate_series(
pg_catalog.date_bin($1, $3 + $1, $4) - $2, $3, $1
) AS dbh(date_bin_hopping)
") => ReturnType::set_of(TimestampTz.into()), oid::FUNC_MZ_DATE_BIN_HOPPING_TSTZ_OID;
},
"encode" => Scalar {
params!(Bytes, String) => BinaryFunc::Encode, 1946;
},
"decode" => Scalar {
params!(String, String) => BinaryFunc::Decode, 1947;
}
}
};
pub static ref INFORMATION_SCHEMA_BUILTINS: HashMap<&'static str, Func> = {
use ParamType::*;
builtins! {
"_pg_expandarray" => Table {
// See: https://github.com/postgres/postgres/blob/16e3ad5d143795b05a21dc887c2ab384cce4bcb8/src/backend/catalog/information_schema.sql#L43
params!(ArrayAny) => sql_impl_table_func("
SELECT
$1[s] AS x,
s - pg_catalog.array_lower($1, 1) + 1 AS n
FROM pg_catalog.generate_series(
pg_catalog.array_lower($1, 1),
pg_catalog.array_upper($1, 1),
1) as g(s)
") => ReturnType::set_of(RecordAny), 13395;
}
}
};
pub static ref MZ_CATALOG_BUILTINS: HashMap<&'static str, Func> = {
use ScalarType::*;
use ParamType::*;
builtins! {
"csv_extract" => Table {
params!(Int64, String) => Operation::binary(move |_ecx, ncols, input| {
let ncols = match ncols.into_literal_int64() {
None | Some(i64::MIN..=0) => {
sql_bail!("csv_extract number of columns must be a positive integer literal");
},
Some(ncols) => ncols,
};
let ncols = usize::try_from(ncols).expect("known to be greater than zero");
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::CsvExtract(ncols),
exprs: vec![input],
},
column_names: (1..=ncols).map(|i| format!("column{}", i).into()).collect(),
})
}) => ReturnType::set_of(RecordAny), oid::FUNC_CSV_EXTRACT_OID;
},
"concat_agg" => Aggregate {
params!(Any) => Operation::unary(|_ecx, _e| bail_unsupported!("concat_agg")) => String, oid::FUNC_CONCAT_AGG_OID;
},
"current_timestamp" => Scalar {
params!() => Operation::nullary(|ecx| plan_current_timestamp(ecx, "current_timestamp")), oid::FUNC_CURRENT_TIMESTAMP_OID;
},
"list_agg" => Aggregate {
params!(Any) => Operation::unary_ordered(|ecx, e, order_by| {
if let ScalarType::Char {.. } = ecx.scalar_type(&e) {
bail_unsupported!("list_agg on char");
};
// ListConcat excepts all inputs to be lists, so wrap all input datums into
// lists.
let e_arr = HirScalarExpr::CallVariadic{
func: VariadicFunc::ListCreate { elem_type: ecx.scalar_type(&e) },
exprs: vec![e],
};
Ok((e_arr, AggregateFunc::ListConcat { order_by }))
}) => ListAny, oid::FUNC_LIST_AGG_OID;
},
"list_append" => Scalar {
vec![ListAny, ListElementAny] => BinaryFunc::ListElementConcat => ListAny, oid::FUNC_LIST_APPEND_OID;
},
"list_cat" => Scalar {
vec![ListAny, ListAny] => BinaryFunc::ListListConcat => ListAny, oid::FUNC_LIST_CAT_OID;
},
"list_ndims" => Scalar {
vec![ListAny] => Operation::unary(|ecx, e| {
ecx.require_experimental_mode("list_ndims")?;
let d = ecx.scalar_type(&e).unwrap_list_n_dims();
Ok(HirScalarExpr::literal(Datum::Int32(d as i32), ScalarType::Int32))
}) => Int32, oid::FUNC_LIST_NDIMS_OID;
},
"list_length" => Scalar {
vec![ListAny] => UnaryFunc::ListLength => Int32, oid::FUNC_LIST_LENGTH_OID;
},
"list_length_max" => Scalar {
vec![ListAny, Plain(Int64)] => Operation::binary(|ecx, lhs, rhs| {
ecx.require_experimental_mode("list_length_max")?;
let max_dim = ecx.scalar_type(&lhs).unwrap_list_n_dims();
Ok(lhs.call_binary(rhs, BinaryFunc::ListLengthMax{ max_dim }))
}) => Int32, oid::FUNC_LIST_LENGTH_MAX_OID;
},
"list_prepend" => Scalar {
vec![ListElementAny, ListAny] => BinaryFunc::ElementListConcat => ListAny, oid::FUNC_LIST_PREPEND_OID;
},
"list_remove" => Scalar {
vec![ListAny, ListElementAny] => Operation::binary(|ecx, lhs, rhs| {
ecx.require_experimental_mode("list_remove")?;
Ok(lhs.call_binary(rhs, BinaryFunc::ListRemove))
}) => ListAny, oid::FUNC_LIST_REMOVE_OID;
},
"mz_cluster_id" => Scalar {
params!() => Operation::nullary(mz_cluster_id), oid::FUNC_MZ_CLUSTER_ID_OID;
},
"mz_logical_timestamp" => Scalar {
params!() => NullaryFunc::MzLogicalTimestamp, oid::FUNC_MZ_LOGICAL_TIMESTAMP_OID;
},
"mz_uptime" => Scalar {
params!() => Operation::nullary(mz_uptime), oid::FUNC_MZ_UPTIME_OID;
},
"mz_version" => Scalar {
params!() => Operation::nullary(|ecx| {
let version = ecx.catalog().config().build_info.human_version();
Ok(HirScalarExpr::literal(Datum::String(&version), ScalarType::String))
}), oid::FUNC_MZ_VERSION_OID;
},
"regexp_extract" => Table {
params!(String, String) => Operation::binary(move |_ecx, regex, haystack| {
let regex = match regex.into_literal_string() {
None => sql_bail!("regex_extract requires a string literal as its first argument"),
Some(regex) => expr::AnalyzedRegex::new(®ex).map_err(|e| PlanError::Unstructured(format!("analyzing regex: {}", e)))?,
};
let column_names = regex
.capture_groups_iter()
.map(|cg| {
cg.name.clone().unwrap_or_else(|| format!("column{}", cg.index)).into()
})
.collect();
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::RegexpExtract(regex),
exprs: vec![haystack],
},
column_names,
})
}) => ReturnType::set_of(RecordAny), oid::FUNC_REGEXP_EXTRACT_OID;
},
"repeat_row" => Table {
params!(Int64) => Operation::unary(move |ecx, n| {
ecx.require_experimental_mode("repeat_row")?;
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::Repeat,
exprs: vec![n],
},
column_names: vec![]
})
}), oid::FUNC_REPEAT_OID;
},
"unnest" => Table {
vec![ArrayAny] => Operation::unary(move |ecx, e| {
let el_typ = ecx.scalar_type(&e).unwrap_array_element_type().clone();
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::UnnestArray { el_typ },
exprs: vec![e],
},
column_names: vec!["unnest".into()],
})
}) => ReturnType::set_of(ListElementAny), 2331;
vec![ListAny] => Operation::unary(move |ecx, e| {
let el_typ = ecx.scalar_type(&e).unwrap_list_element_type().clone();
Ok(TableFuncPlan {
expr: HirRelationExpr::CallTable {
func: TableFunc::UnnestList { el_typ },
exprs: vec![e],
},
column_names: vec!["unnest".into()],
})
}) => ReturnType::set_of(ListElementAny), oid::FUNC_UNNEST_LIST_OID;
}
}
};
pub static ref MZ_INTERNAL_BUILTINS: HashMap<&'static str, Func> = {
use ParamType::*;
use ScalarType::*;
builtins! {
"mz_all" => Aggregate {
params!(Any) => AggregateFunc::All => Bool, oid::FUNC_MZ_ALL_OID;
},
"mz_any" => Aggregate {
params!(Any) => AggregateFunc::Any => Bool, oid::FUNC_MZ_ANY_OID;
},
"mz_avg_promotion" => Scalar {
// Promotes a numeric type to the smallest fractional type that
// can represent it. This is primarily useful for the avg
// aggregate function, so that the avg of an integer column does
// not get truncated to an integer, which would be surprising to
// users (#549).
params!(Float32) => Operation::identity(), oid::FUNC_MZ_AVG_PROMOTION_F32_OID;
params!(Float64) => Operation::identity(), oid::FUNC_MZ_AVG_PROMOTION_F64_OID;
params!(Int16) => Operation::unary(|ecx, e| {
typeconv::plan_cast(
ecx, CastContext::Explicit, e, &ScalarType::Numeric {scale: None},
)
}), oid::FUNC_MZ_AVG_PROMOTION_I16_OID;
params!(Int32) => Operation::unary(|ecx, e| {
typeconv::plan_cast(
ecx, CastContext::Explicit, e, &ScalarType::Numeric {scale: None},
)
}), oid::FUNC_MZ_AVG_PROMOTION_I32_OID;
},
"mz_classify_object_id" => Scalar {
params!(String) => sql_impl_func(
"CASE
WHEN $1 LIKE 'u%' THEN 'user'
WHEN $1 LIKE 's%' THEN 'system'
WHEN $1 like 't%' THEN 'temp'
END"
) => String, oid::FUNC_MZ_CLASSIFY_OBJECT_ID_OID;
},
"mz_error_if_null" => Scalar {
// If the first argument is NULL, returns an EvalError::Internal whose error
// message is the second argument.
params!(Any, String) => VariadicFunc::ErrorIfNull => Any, oid::FUNC_MZ_ERROR_IF_NULL_OID;
},
"mz_is_materialized" => Scalar {
params!(String) => sql_impl_func("EXISTS (SELECT 1 FROM mz_indexes WHERE on_id = $1 AND enabled)") => Bool,
oid::FUNC_MZ_IS_MATERIALIZED_OID;
},
"mz_render_typemod" => Scalar {
params!(Oid, Int32) => BinaryFunc::MzRenderTypemod, oid::FUNC_MZ_RENDER_TYPEMOD_OID;
},
// This ought to be exposed in `mz_catalog`, but its name is rather
// confusing. It does not identify the SQL session, but the
// invocation of this `materialized` process.
"mz_session_id" => Scalar {
params!() => Operation::nullary(mz_session_id), oid::FUNC_MZ_SESSION_ID_OID;
},
"mz_sleep" => Scalar {
params!(Float64) => UnaryFunc::Sleep(func::Sleep), oid::FUNC_MZ_SLEEP_OID;
}
}
};
}
fn plan_current_timestamp(ecx: &ExprContext, name: &str) -> Result<HirScalarExpr, PlanError> {
match ecx.qcx.lifetime {
QueryLifetime::OneShot(pcx) => Ok(HirScalarExpr::literal(
Datum::from(pcx.wall_time),
ScalarType::TimestampTz,
)),
QueryLifetime::Static => sql_bail!("{} cannot be used in static queries; see: https://materialize.com/docs/sql/functions/now_and_mz_logical_timestamp/", name),
}
}
fn digest(algorithm: &'static str) -> Operation<HirScalarExpr> {
Operation::unary(move |_ecx, input| {
let algorithm = HirScalarExpr::literal(Datum::String(algorithm), ScalarType::String);
Ok(input.call_binary(algorithm, BinaryFunc::DigestBytes))
})
}
fn mz_cluster_id(ecx: &ExprContext) -> Result<HirScalarExpr, PlanError> {
Ok(HirScalarExpr::literal(
Datum::from(ecx.catalog().config().cluster_id),
ScalarType::Uuid,
))
}
fn mz_session_id(ecx: &ExprContext) -> Result<HirScalarExpr, PlanError> {
Ok(HirScalarExpr::literal(
Datum::from(ecx.catalog().config().session_id),
ScalarType::Uuid,
))
}
fn mz_uptime(ecx: &ExprContext) -> Result<HirScalarExpr, PlanError> {
let uptime = ecx.catalog().config().start_instant.elapsed();
let uptime = chrono::Duration::from_std(uptime)
.map_err(|e| PlanError::Unstructured(format!("converting uptime to duration: {}", e)))?;
match ecx.qcx.lifetime {
QueryLifetime::OneShot(_) => Ok(HirScalarExpr::literal(
Datum::from(uptime),
ScalarType::Interval,
)),
QueryLifetime::Static => sql_bail!("mz_uptime cannot be used in static queries"),
}
}
fn pg_postmaster_start_time(ecx: &ExprContext) -> Result<HirScalarExpr, PlanError> {
Ok(HirScalarExpr::literal(
Datum::from(ecx.catalog().config().start_time),
ScalarType::TimestampTz,
))
}
fn array_to_string(
ecx: &ExprContext,
exprs: Vec<HirScalarExpr>,
) -> Result<HirScalarExpr, PlanError> {
let elem_type = match ecx.scalar_type(&exprs[0]) {
ScalarType::Array(elem_type) => *elem_type,
_ => unreachable!("array_to_string is guaranteed to receive array as first argument"),
};
Ok(HirScalarExpr::CallVariadic {
func: VariadicFunc::ArrayToString { elem_type },
exprs,
})
}
lazy_static! {
/// Correlates an operator with all of its implementations.
static ref OP_IMPLS: HashMap<&'static str, Func> = {
use ScalarBaseType::*;
use BinaryFunc::*;
use ParamType::*;
builtins! {
// Literal OIDs collected from PG 13 using a version of this query
// ```sql
// SELECT
// oid,
// oprname,
// oprleft::regtype,
// oprright::regtype
// FROM
// pg_operator
// WHERE
// oprname IN (
// '+', '-', '*', '/', '%',
// '|', '&', '#', '~', '<<', '>>',
// '~~', '!~~'
// )
// ORDER BY
// oprname;
// ```
// Values are also available through
// https://github.com/postgres/postgres/blob/master/src/include/catalog/pg_operator.dat
// ARITHMETIC
"+" => Scalar {
params!(Any) => Operation::new(|ecx, exprs, _params, _order_by| {
// Unary plus has unusual compatibility requirements.
//
// In PostgreSQL, it is only defined for numeric types, so
// `+$1` and `+'1'` get coerced to `Float64` per the usual
// rules, but `+'1'::text` is rejected.
//
// In SQLite, unary plus can be applied to *any* type, and
// is always the identity function.
//
// To try to be compatible with both PostgreSQL and SQlite,
// we accept explicitly-typed arguments of any type, but try
// to coerce unknown-type arguments as `Float64`.
typeconv::plan_coerce(ecx, exprs.into_element(), &ScalarType::Float64)
}) => Any, oid::OP_UNARY_PLUS_OID;
params!(Int16, Int16) => AddInt16, 550;
params!(Int32, Int32) => AddInt32, 551;
params!(Int64, Int64) => AddInt64, 684;
params!(Float32, Float32) => AddFloat32, 586;
params!(Float64, Float64) => AddFloat64, 591;
params!(Interval, Interval) => AddInterval, 1337;
params!(Timestamp, Interval) => AddTimestampInterval, 2066;
params!(Interval, Timestamp) => {
Operation::binary(|_ecx, lhs, rhs| Ok(rhs.call_binary(lhs, AddTimestampInterval)))
}, 2066;
params!(TimestampTz, Interval) => AddTimestampTzInterval, 1327;
params!(Interval, TimestampTz) => {
Operation::binary(|_ecx, lhs, rhs| Ok(rhs.call_binary(lhs, AddTimestampTzInterval)))
}, 2554;
params!(Date, Interval) => AddDateInterval, 1076;
params!(Interval, Date) => {
Operation::binary(|_ecx, lhs, rhs| Ok(rhs.call_binary(lhs, AddDateInterval)))
}, 2551;
params!(Date, Time) => AddDateTime, 1360;
params!(Time, Date) => {
Operation::binary(|_ecx, lhs, rhs| Ok(rhs.call_binary(lhs, AddDateTime)))
}, 1363;
params!(Time, Interval) => AddTimeInterval, 1800;
params!(Interval, Time) => {
Operation::binary(|_ecx, lhs, rhs| Ok(rhs.call_binary(lhs, AddTimeInterval)))
}, 1849;
params!(Numeric, Numeric) => AddNumeric, 1758;
},
"-" => Scalar {
params!(Int16) => UnaryFunc::NegInt16(func::NegInt16), 559;
params!(Int32) => UnaryFunc::NegInt32(func::NegInt32), 558;
params!(Int64) => UnaryFunc::NegInt64(func::NegInt64), 484;
params!(Float32) => UnaryFunc::NegFloat32(func::NegFloat32), 584;
params!(Float64) => UnaryFunc::NegFloat64(func::NegFloat64), 585;
params!(Numeric) => UnaryFunc::NegNumeric(func::NegNumeric), 17510;
params!(Interval) => UnaryFunc::NegInterval(func::NegInterval), 1336;
params!(Int32, Int32) => SubInt32, 555;
params!(Int64, Int64) => SubInt64, 685;
params!(Float32, Float32) => SubFloat32, 587;
params!(Float64, Float64) => SubFloat64, 592;
params!(Numeric, Numeric) => SubNumeric, 17590;
params!(Interval, Interval) => SubInterval, 1338;
params!(Timestamp, Timestamp) => SubTimestamp, 2067;
params!(TimestampTz, TimestampTz) => SubTimestampTz, 1328;
params!(Timestamp, Interval) => SubTimestampInterval, 2068;
params!(TimestampTz, Interval) => SubTimestampTzInterval, 1329;
params!(Date, Date) => SubDate, 1099;
params!(Date, Interval) => SubDateInterval, 1077;
params!(Time, Time) => SubTime, 1399;
params!(Time, Interval) => SubTimeInterval, 1801;
params!(Jsonb, Int64) => JsonbDeleteInt64, 3286;
params!(Jsonb, String) => JsonbDeleteString, 3285;
// TODO(jamii) there should be corresponding overloads for
// Array(Int64) and Array(String)
},
"*" => Scalar {
params!(Int16, Int16) => MulInt16, 526;
params!(Int32, Int32) => MulInt32, 514;
params!(Int64, Int64) => MulInt64, 686;
params!(Float32, Float32) => MulFloat32, 589;
params!(Float64, Float64) => MulFloat64, 594;
params!(Interval, Float64) => MulInterval, 1583;
params!(Float64, Interval) => {
Operation::binary(|_ecx, lhs, rhs| Ok(rhs.call_binary(lhs, MulInterval)))
}, 1584;
params!(Numeric, Numeric) => MulNumeric, 1760;
},
"/" => Scalar {
params!(Int16, Int16) => DivInt16, 527;
params!(Int32, Int32) => DivInt32, 528;
params!(Int64, Int64) => DivInt64, 687;
params!(Float32, Float32) => DivFloat32, 588;
params!(Float64, Float64) => DivFloat64, 593;
params!(Interval, Float64) => DivInterval, 1585;
params!(Numeric, Numeric) => DivNumeric, 1761;
},
"%" => Scalar {
params!(Int16, Int16) => ModInt16, 529;
params!(Int32, Int32) => ModInt32, 530;
params!(Int64, Int64) => ModInt64, 439;
params!(Float32, Float32) => ModFloat32, oid::OP_MOD_F32_OID;
params!(Float64, Float64) => ModFloat64, oid::OP_MOD_F64_OID;
params!(Numeric, Numeric) => ModNumeric, 1762;
},
"&" => Scalar {
params!(Int16, Int16) => BitAndInt16, 1874;
params!(Int32, Int32) => BitAndInt32, 1880;
params!(Int64, Int64) => BitAndInt64, 1886;
},
"|" => Scalar {
params!(Int16, Int16) => BitOrInt16, 1875;
params!(Int32, Int32) => BitOrInt32, 1881;
params!(Int64, Int64) => BitOrInt64, 1887;
},
"#" => Scalar {
params!(Int16, Int16) => BitXorInt16, 1876;
params!(Int32, Int32) => BitXorInt32, 1882;
params!(Int64, Int64) => BitXorInt64, 1888;
},
"<<" => Scalar {
params!(Int16, Int32) => BitShiftLeftInt16, 1878;
params!(Int32, Int32) => BitShiftLeftInt32, 1884;
params!(Int64, Int32) => BitShiftLeftInt64, 1890;
},
">>" => Scalar {
params!(Int16, Int32) => BitShiftRightInt16, 1879;
params!(Int32, Int32) => BitShiftRightInt32, 1885;
params!(Int64, Int32) => BitShiftRightInt64, 1891;
},
// ILIKE
"~~*" => Scalar {
params!(String, String) => IsLikePatternMatch { case_insensitive: true }, 1627;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsLikePatternMatch { case_insensitive: true })
)
}), 1629;
},
"!~~*" => Scalar {
params!(String, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs
.call_binary(rhs, IsLikePatternMatch { case_insensitive: true })
.call_unary(UnaryFunc::Not(func::Not)))
}) => Bool, 1628;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsLikePatternMatch { case_insensitive: false })
.call_unary(UnaryFunc::Not(func::Not))
)
}) => Bool, 1630;
},
// LIKE
"~~" => Scalar {
params!(String, String) => IsLikePatternMatch { case_insensitive: false }, 1209;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsLikePatternMatch { case_insensitive: false })
)
}), 1211;
},
"!~~" => Scalar {
params!(String, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs
.call_binary(rhs, IsLikePatternMatch { case_insensitive: false })
.call_unary(UnaryFunc::Not(func::Not)))
}) => Bool, 1210;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsLikePatternMatch { case_insensitive: false })
.call_unary(UnaryFunc::Not(func::Not))
)
}) => Bool, 1212;
},
// REGEX
"~" => Scalar {
params!(Int16) => UnaryFunc::BitNotInt16(func::BitNotInt16), 1877;
params!(Int32) => UnaryFunc::BitNotInt32(func::BitNotInt32), 1883;
params!(Int64) => UnaryFunc::BitNotInt64(func::BitNotInt64), 1889;
params!(String, String) => IsRegexpMatch { case_insensitive: false }, 641;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsRegexpMatch { case_insensitive: false })
)
}), 1055;
},
"~*" => Scalar {
params!(String, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs.call_binary(rhs, IsRegexpMatch { case_insensitive: true }))
}), 1228;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsRegexpMatch { case_insensitive: true })
)
}), 1234;
},
"!~" => Scalar {
params!(String, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs
.call_binary(rhs, IsRegexpMatch { case_insensitive: false })
.call_unary(UnaryFunc::Not(func::Not)))
}) => Bool, 642;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsRegexpMatch { case_insensitive: true })
.call_unary(UnaryFunc::Not(func::Not))
)
}) => Bool, 1056;
},
"!~*" => Scalar {
params!(String, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs
.call_binary(rhs, IsRegexpMatch { case_insensitive: true })
.call_unary(UnaryFunc::Not(func::Not)))
}) => Bool, 1229;
params!(Char, String) => Operation::binary(|ecx, lhs, rhs| {
let length = ecx.scalar_type(&lhs).unwrap_char_varchar_length();
Ok(lhs.call_unary(UnaryFunc::PadChar(func::PadChar { length }))
.call_binary(rhs, IsRegexpMatch { case_insensitive: true })
.call_unary(UnaryFunc::Not(func::Not))
)
}) => Bool, 1235;
},
// CONCAT
"||" => Scalar {
params!(String, NonVecAny) => Operation::binary(|ecx, lhs, rhs| {
let rhs = typeconv::plan_cast(
ecx,
CastContext::Explicit,
rhs,
&ScalarType::String,
)?;
Ok(lhs.call_binary(rhs, TextConcat))
}) => String, 2779;
params!(NonVecAny, String) => Operation::binary(|ecx, lhs, rhs| {
let lhs = typeconv::plan_cast(
ecx,
CastContext::Explicit,
lhs,
&ScalarType::String,
)?;
Ok(lhs.call_binary(rhs, TextConcat))
}) => String, 2780;
params!(String, String) => TextConcat, 654;
params!(Jsonb, Jsonb) => JsonbConcat, 3284;
params!(ArrayAny, ArrayAny) => ArrayArrayConcat => ArrayAny, 375;
params!(ListAny, ListAny) => ListListConcat => ListAny, oid::OP_CONCAT_LIST_LIST_OID;
params!(ListAny, ListElementAny) => ListElementConcat => ListAny, oid::OP_CONCAT_LIST_ELEMENT_OID;
params!(ListElementAny, ListAny) => ElementListConcat => ListAny, oid::OP_CONCAT_ELEMENY_LIST_OID;
},
//JSON and MAP
"->" => Scalar {
params!(Jsonb, Int64) => JsonbGetInt64 { stringify: false }, 3212;
params!(Jsonb, String) => JsonbGetString { stringify: false }, 3211;
params!(MapAny, String) => MapGetValue => Any, oid::OP_GET_VALUE_MAP_OID;
params!(MapAny, ScalarType::Array(Box::new(ScalarType::String))) => MapGetValues => ArrayAny, oid::OP_GET_VALUES_MAP_OID;
},
"->>" => Scalar {
params!(Jsonb, Int64) => JsonbGetInt64 { stringify: true }, 3481;
params!(Jsonb, String) => JsonbGetString { stringify: true }, 3477;
},
"#>" => Scalar {
params!(Jsonb, ScalarType::Array(Box::new(ScalarType::String))) => JsonbGetPath { stringify: false }, 3213;
},
"#>>" => Scalar {
params!(Jsonb, ScalarType::Array(Box::new(ScalarType::String))) => JsonbGetPath { stringify: true }, 3206;
},
"@>" => Scalar {
params!(Jsonb, Jsonb) => JsonbContainsJsonb, 3246;
params!(Jsonb, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs.call_binary(
rhs.call_unary(UnaryFunc::CastStringToJsonb),
JsonbContainsJsonb,
))
}), oid::OP_CONTAINS_JSONB_STRING_OID;
params!(String, Jsonb) => Operation::binary(|_ecx, lhs, rhs| {
Ok(lhs.call_unary(UnaryFunc::CastStringToJsonb)
.call_binary(rhs, JsonbContainsJsonb))
}), oid::OP_CONTAINS_STRING_JSONB_OID;
params!(MapAny, MapAny) => MapContainsMap => Bool, oid::OP_CONTAINS_MAP_MAP_OID;
},
"<@" => Scalar {
params!(Jsonb, Jsonb) => Operation::binary(|_ecx, lhs, rhs| {
Ok(rhs.call_binary(
lhs,
JsonbContainsJsonb
))
}), 3246;
params!(Jsonb, String) => Operation::binary(|_ecx, lhs, rhs| {
Ok(rhs.call_unary(UnaryFunc::CastStringToJsonb)
.call_binary(lhs, BinaryFunc::JsonbContainsJsonb))
}), oid::OP_CONTAINED_JSONB_STRING_OID;
params!(String, Jsonb) => Operation::binary(|_ecx, lhs, rhs| {
Ok(rhs.call_binary(
lhs.call_unary(UnaryFunc::CastStringToJsonb),
BinaryFunc::JsonbContainsJsonb,
))
}), oid::OP_CONTAINED_STRING_JSONB_OID;
params!(MapAny, MapAny) => Operation::binary(|_ecx, lhs, rhs| {
Ok(rhs.call_binary(lhs, MapContainsMap))
}) => Bool, oid::OP_CONTAINED_MAP_MAP_OID;
},
"?" => Scalar {
params!(Jsonb, String) => JsonbContainsString, 3247;
params!(MapAny, String) => MapContainsKey => Bool, oid::OP_CONTAINS_KEY_MAP_OID;
},
"?&" => Scalar {
params!(MapAny, ScalarType::Array(Box::new(ScalarType::String))) => MapContainsAllKeys => Bool, oid::OP_CONTAINS_ALL_KEYS_MAP_OID;
},
"?|" => Scalar {
params!(MapAny, ScalarType::Array(Box::new(ScalarType::String))) => MapContainsAnyKeys => Bool, oid::OP_CONTAINS_ANY_KEYS_MAP_OID;
},
// COMPARISON OPS
"<" => Scalar {
params!(Numeric, Numeric) => BinaryFunc::Lt, 1754;
params!(Bool, Bool) => BinaryFunc::Lt, 58;
params!(Int16, Int16) => BinaryFunc::Lt, 94;
params!(Int32, Int32) => BinaryFunc::Lt, 97;
params!(Int64, Int64) => BinaryFunc::Lt, 412;
params!(Float32, Float32) => BinaryFunc::Lt, 622;
params!(Float64, Float64) => BinaryFunc::Lt, 672;
params!(Oid, Oid) => BinaryFunc::Lt, 609;
params!(Date, Date) => BinaryFunc::Lt, 1095;
params!(Time, Time) => BinaryFunc::Lt, 1110;
params!(Timestamp, Timestamp) => BinaryFunc::Lt, 2062;
params!(TimestampTz, TimestampTz) => BinaryFunc::Lt, 1322;
params!(Uuid, Uuid) => BinaryFunc::Lt, 2974;
params!(Interval, Interval) => BinaryFunc::Lt, 1332;
params!(Bytes, Bytes) => BinaryFunc::Lt, 1957;
params!(String, String) => BinaryFunc::Lt, 664;
params!(Char, Char) => BinaryFunc::Lt, 1058;
params!(Jsonb, Jsonb) => BinaryFunc::Lt, 3242;
params!(ArrayAny, ArrayAny) => BinaryFunc::Lt => Bool, 1072;
},
"<=" => Scalar {
params!(Numeric, Numeric) => BinaryFunc::Lte, 1755;
params!(Bool, Bool) => BinaryFunc::Lte, 1694;
params!(Int16, Int16) => BinaryFunc::Lte, 522;
params!(Int32, Int32) => BinaryFunc::Lte, 523;
params!(Int64, Int64) => BinaryFunc::Lte, 414;
params!(Float32, Float32) => BinaryFunc::Lte, 624;
params!(Float64, Float64) => BinaryFunc::Lte, 673;
params!(Oid, Oid) => BinaryFunc::Lte, 611;
params!(Date, Date) => BinaryFunc::Lte, 1096;
params!(Time, Time) => BinaryFunc::Lte, 1111;
params!(Timestamp, Timestamp) => BinaryFunc::Lte, 2063;
params!(TimestampTz, TimestampTz) => BinaryFunc::Lte, 1323;
params!(Uuid, Uuid) => BinaryFunc::Lte, 2976;
params!(Interval, Interval) => BinaryFunc::Lte, 1333;
params!(Bytes, Bytes) => BinaryFunc::Lte, 1958;
params!(String, String) => BinaryFunc::Lte, 665;
params!(Char, Char) => BinaryFunc::Lte, 1059;
params!(Jsonb, Jsonb) => BinaryFunc::Lte, 3244;
params!(ArrayAny, ArrayAny) => BinaryFunc::Lte => Bool, 1074;
},
">" => Scalar {
params!(Numeric, Numeric) => BinaryFunc::Gt, 1756;
params!(Bool, Bool) => BinaryFunc::Gt, 59;
params!(Int16, Int16) => BinaryFunc::Gt, 520;
params!(Int32, Int32) => BinaryFunc::Gt, 521;
params!(Int64, Int64) => BinaryFunc::Gt, 413;
params!(Float32, Float32) => BinaryFunc::Gt, 623;
params!(Float64, Float64) => BinaryFunc::Gt, 674;
params!(Oid, Oid) => BinaryFunc::Gt, 610;
params!(Date, Date) => BinaryFunc::Gt, 1097;
params!(Time, Time) => BinaryFunc::Gt, 1112;
params!(Timestamp, Timestamp) => BinaryFunc::Gt, 2064;
params!(TimestampTz, TimestampTz) => BinaryFunc::Gt, 1324;
params!(Uuid, Uuid) => BinaryFunc::Gt, 2975;
params!(Interval, Interval) => BinaryFunc::Gt, 1334;
params!(Bytes, Bytes) => BinaryFunc::Gt, 1959;
params!(String, String) => BinaryFunc::Gt, 666;
params!(Char, Char) => BinaryFunc::Gt, 1060;
params!(Jsonb, Jsonb) => BinaryFunc::Gt, 3243;
params!(ArrayAny, ArrayAny) => BinaryFunc::Gt => Bool, 1073;
},
">=" => Scalar {
params!(Numeric, Numeric) => BinaryFunc::Gte, 1757;
params!(Bool, Bool) => BinaryFunc::Gte, 1695;
params!(Int16, Int16) => BinaryFunc::Gte, 524;
params!(Int32, Int32) => BinaryFunc::Gte, 525;
params!(Int64, Int64) => BinaryFunc::Gte, 415;
params!(Float32, Float32) => BinaryFunc::Gte, 625;
params!(Float64, Float64) => BinaryFunc::Gte, 675;
params!(Oid, Oid) => BinaryFunc::Gte, 612;
params!(Date, Date) => BinaryFunc::Gte, 1098;
params!(Time, Time) => BinaryFunc::Gte, 1113;
params!(Timestamp, Timestamp) => BinaryFunc::Gte, 2065;
params!(TimestampTz, TimestampTz) => BinaryFunc::Gte, 1325;
params!(Uuid, Uuid) => BinaryFunc::Gte, 2977;
params!(Interval, Interval) => BinaryFunc::Gte, 1335;
params!(Bytes, Bytes) => BinaryFunc::Gte, 1960;
params!(String, String) => BinaryFunc::Gte, 667;
params!(Char, Char) => BinaryFunc::Gte, 1061;
params!(Jsonb, Jsonb) => BinaryFunc::Gte, 3245;
params!(ArrayAny, ArrayAny) => BinaryFunc::Gte => Bool, 1075;
},
// Warning! If you are writing functions here that do not simply use
// `BinaryFunc::Eq`, you will break row equality (used e.g. DISTINCT
// operations).
"=" => Scalar {
params!(Numeric, Numeric) => BinaryFunc::Eq, 1752;
params!(Bool, Bool) => BinaryFunc::Eq, 91;
params!(Int16, Int16) => BinaryFunc::Eq, 94;
params!(Int32, Int32) => BinaryFunc::Eq, 96;
params!(Int64, Int64) => BinaryFunc::Eq, 410;
params!(Float32, Float32) => BinaryFunc::Eq, 620;
params!(Float64, Float64) => BinaryFunc::Eq, 670;
params!(Oid, Oid) => BinaryFunc::Eq, 607;
params!(Date, Date) => BinaryFunc::Eq, 1093;
params!(Time, Time) => BinaryFunc::Eq, 1108;
params!(Timestamp, Timestamp) => BinaryFunc::Eq, 2060;
params!(TimestampTz, TimestampTz) => BinaryFunc::Eq, 1320;
params!(Uuid, Uuid) => BinaryFunc::Eq, 2972;
params!(Interval, Interval) => BinaryFunc::Eq, 1330;
params!(Bytes, Bytes) => BinaryFunc::Eq, 1955;
params!(String, String) => BinaryFunc::Eq, 98;
params!(Char, Char) => BinaryFunc::Eq, 1054;
params!(Jsonb, Jsonb) => BinaryFunc::Eq, 3240;
params!(ListAny, ListAny) => BinaryFunc::Eq => Bool, oid::FUNC_LIST_EQ_OID;
params!(ArrayAny, ArrayAny) => BinaryFunc::Eq => Bool, 1070;
},
"<>" => Scalar {
params!(Numeric, Numeric) => BinaryFunc::NotEq, 1753;
params!(Bool, Bool) => BinaryFunc::NotEq, 85;
params!(Int16, Int16) => BinaryFunc::NotEq, 519;
params!(Int32, Int32) => BinaryFunc::NotEq, 518;
params!(Int64, Int64) => BinaryFunc::NotEq, 411;
params!(Float32, Float32) => BinaryFunc::NotEq, 621;
params!(Float64, Float64) => BinaryFunc::NotEq, 671;
params!(Oid, Oid) => BinaryFunc::NotEq, 608;
params!(Date, Date) => BinaryFunc::NotEq, 1094;
params!(Time, Time) => BinaryFunc::NotEq, 1109;
params!(Timestamp, Timestamp) => BinaryFunc::NotEq, 2061;
params!(TimestampTz, TimestampTz) => BinaryFunc::NotEq, 1321;
params!(Uuid, Uuid) => BinaryFunc::NotEq, 2973;
params!(Interval, Interval) => BinaryFunc::NotEq, 1331;
params!(Bytes, Bytes) => BinaryFunc::NotEq, 1956;
params!(String, String) => BinaryFunc::NotEq, 531;
params!(Char, Char) => BinaryFunc::NotEq, 1057;
params!(Jsonb, Jsonb) => BinaryFunc::NotEq, 3241;
params!(ArrayAny, ArrayAny) => BinaryFunc::NotEq => Bool, 1071;
}
}
};
}
/// Resolves the operator to a set of function implementations.
pub fn resolve_op(op: &str) -> Result<&'static [FuncImpl<HirScalarExpr>], PlanError> {
match OP_IMPLS.get(op) {
Some(Func::Scalar(impls)) => Ok(&impls),
Some(_) => unreachable!("all operators must be scalar functions"),
// TODO: these require sql arrays
// JsonContainsAnyFields
// JsonContainsAllFields
// TODO: these require json paths
// JsonGetPath
// JsonGetPathAsText
// JsonDeletePath
// JsonContainsPath
// JsonApplyPathPredicate
None => bail_unsupported!(format!("[{}]", op)),
}
}
| 45.194766 | 167 | 0.525877 |
56959dae8fcc1fa6aeadeadf3193047f2a154d40 | 9,590 | /*
* Onshape REST API
*
* The Onshape REST API consumed by all clients.
*
* The version of the OpenAPI document: 1.104
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BtDocumentProcessingInfo {
#[serde(rename = "treeHref", skip_serializing_if = "Option::is_none")]
pub tree_href: Option<String>,
#[serde(rename = "isMutable", skip_serializing_if = "Option::is_none")]
pub is_mutable: Option<bool>,
#[serde(rename = "resourceType", skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "modifiedAt", skip_serializing_if = "Option::is_none")]
pub modified_at: Option<String>,
#[serde(rename = "createdAt", skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "createdBy", skip_serializing_if = "Option::is_none")]
pub created_by: Option<crate::models::BtUserBasicSummaryInfo>,
#[serde(rename = "modifiedBy", skip_serializing_if = "Option::is_none")]
pub modified_by: Option<crate::models::BtUserBasicSummaryInfo>,
#[serde(rename = "projectId", skip_serializing_if = "Option::is_none")]
pub project_id: Option<String>,
#[serde(rename = "canMove", skip_serializing_if = "Option::is_none")]
pub can_move: Option<bool>,
#[serde(rename = "isContainer", skip_serializing_if = "Option::is_none")]
pub is_container: Option<bool>,
#[serde(rename = "isEnterpriseOwned", skip_serializing_if = "Option::is_none")]
pub is_enterprise_owned: Option<bool>,
#[serde(rename = "hasPendingOwner", skip_serializing_if = "Option::is_none")]
pub has_pending_owner: Option<bool>,
#[serde(rename = "owner", skip_serializing_if = "Option::is_none")]
pub owner: Option<crate::models::BtOwnerInfo>,
#[serde(rename = "href", skip_serializing_if = "Option::is_none")]
pub href: Option<String>,
#[serde(rename = "viewRef", skip_serializing_if = "Option::is_none")]
pub view_ref: Option<String>,
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "defaultElementId", skip_serializing_if = "Option::is_none")]
pub default_element_id: Option<String>,
#[serde(rename = "defaultWorkspace", skip_serializing_if = "Option::is_none")]
pub default_workspace: Option<crate::models::BtBaseInfo>,
#[serde(rename = "parentId", skip_serializing_if = "Option::is_none")]
pub parent_id: Option<String>,
#[serde(rename = "permissionSet", skip_serializing_if = "Option::is_none")]
pub permission_set: Option<serde_json::Value>,
#[serde(rename = "trash", skip_serializing_if = "Option::is_none")]
pub trash: Option<bool>,
#[serde(rename = "totalWorkspacesUpdating", skip_serializing_if = "Option::is_none")]
pub total_workspaces_updating: Option<i32>,
#[serde(rename = "totalWorkspacesScheduledForUpdate", skip_serializing_if = "Option::is_none")]
pub total_workspaces_scheduled_for_update: Option<i32>,
#[serde(rename = "canUnshare", skip_serializing_if = "Option::is_none")]
pub can_unshare: Option<bool>,
#[serde(rename = "thumbnail", skip_serializing_if = "Option::is_none")]
pub thumbnail: Option<crate::models::BtThumbnailInfo>,
#[serde(rename = "supportTeamUserAndShared", skip_serializing_if = "Option::is_none")]
pub support_team_user_and_shared: Option<bool>,
#[serde(rename = "likedByCurrentUser", skip_serializing_if = "Option::is_none")]
pub liked_by_current_user: Option<bool>,
#[serde(rename = "documentLabels", skip_serializing_if = "Option::is_none")]
pub document_labels: Option<Vec<crate::models::BtDocumentLabelInfo>>,
#[serde(rename = "numberOfTimesReferenced", skip_serializing_if = "Option::is_none")]
pub number_of_times_referenced: Option<i64>,
#[serde(rename = "numberOfTimesCopied", skip_serializing_if = "Option::is_none")]
pub number_of_times_copied: Option<i64>,
#[serde(rename = "likes", skip_serializing_if = "Option::is_none")]
pub likes: Option<i64>,
#[serde(rename = "recentVersion", skip_serializing_if = "Option::is_none")]
pub recent_version: Option<crate::models::BtBaseInfo>,
#[serde(rename = "hasRelevantInsertables", skip_serializing_if = "Option::is_none")]
pub has_relevant_insertables: Option<bool>,
#[serde(rename = "createdWithEducationPlan", skip_serializing_if = "Option::is_none")]
pub created_with_education_plan: Option<bool>,
#[serde(rename = "notRevisionManaged", skip_serializing_if = "Option::is_none")]
pub not_revision_managed: Option<bool>,
#[serde(rename = "anonymousAccessAllowed", skip_serializing_if = "Option::is_none")]
pub anonymous_access_allowed: Option<bool>,
#[serde(rename = "anonymousAllowsExport", skip_serializing_if = "Option::is_none")]
pub anonymous_allows_export: Option<bool>,
#[serde(rename = "tags", skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<String>>,
#[serde(rename = "trashedAt", skip_serializing_if = "Option::is_none")]
pub trashed_at: Option<String>,
#[serde(rename = "isOrphaned", skip_serializing_if = "Option::is_none")]
pub is_orphaned: Option<bool>,
#[serde(rename = "public", skip_serializing_if = "Option::is_none")]
pub public: Option<bool>,
#[serde(rename = "userAccountLimitsBreached", skip_serializing_if = "Option::is_none")]
pub user_account_limits_breached: Option<bool>,
#[serde(rename = "isUsingManagedWorkflow", skip_serializing_if = "Option::is_none")]
pub is_using_managed_workflow: Option<bool>,
#[serde(rename = "permission", skip_serializing_if = "Option::is_none")]
pub permission: Option<Permission>,
#[serde(rename = "hasReleaseRevisionableObjects", skip_serializing_if = "Option::is_none")]
pub has_release_revisionable_objects: Option<bool>,
#[serde(rename = "documentThumbnailElementId", skip_serializing_if = "Option::is_none")]
pub document_thumbnail_element_id: Option<String>,
#[serde(rename = "duplicateNameViolationError", skip_serializing_if = "Option::is_none")]
pub duplicate_name_violation_error: Option<String>,
#[serde(rename = "betaCapabilityIds", skip_serializing_if = "Option::is_none")]
pub beta_capability_ids: Option<Vec<String>>,
#[serde(rename = "isUpgradedToLatestVersion", skip_serializing_if = "Option::is_none")]
pub is_upgraded_to_latest_version: Option<bool>,
#[serde(rename = "translationId", skip_serializing_if = "Option::is_none")]
pub translation_id: Option<String>,
#[serde(rename = "translationEventKey", skip_serializing_if = "Option::is_none")]
pub translation_event_key: Option<String>,
}
impl BtDocumentProcessingInfo {
pub fn new(json_type: String) -> BtDocumentProcessingInfo {
BtDocumentProcessingInfo {
tree_href: None,
is_mutable: None,
resource_type: None,
description: None,
modified_at: None,
created_at: None,
created_by: None,
modified_by: None,
project_id: None,
can_move: None,
is_container: None,
is_enterprise_owned: None,
has_pending_owner: None,
owner: None,
href: None,
view_ref: None,
name: None,
id: None,
default_element_id: None,
default_workspace: None,
parent_id: None,
permission_set: None,
trash: None,
total_workspaces_updating: None,
total_workspaces_scheduled_for_update: None,
can_unshare: None,
thumbnail: None,
support_team_user_and_shared: None,
liked_by_current_user: None,
document_labels: None,
number_of_times_referenced: None,
number_of_times_copied: None,
likes: None,
recent_version: None,
has_relevant_insertables: None,
created_with_education_plan: None,
not_revision_managed: None,
anonymous_access_allowed: None,
anonymous_allows_export: None,
tags: None,
trashed_at: None,
is_orphaned: None,
public: None,
user_account_limits_breached: None,
is_using_managed_workflow: None,
permission: None,
has_release_revisionable_objects: None,
document_thumbnail_element_id: None,
duplicate_name_violation_error: None,
beta_capability_ids: None,
is_upgraded_to_latest_version: None,
translation_id: None,
translation_event_key: None,
}
}
}
///
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Permission {
#[serde(rename = "NOACCESS")]
NOACCESS,
#[serde(rename = "ANONYMOUS_ACCESS")]
ANONYMOUSACCESS,
#[serde(rename = "READ")]
READ,
#[serde(rename = "READ_COPY_EXPORT")]
READCOPYEXPORT,
#[serde(rename = "COMMENT")]
COMMENT,
#[serde(rename = "WRITE")]
WRITE,
#[serde(rename = "RESHARE")]
RESHARE,
#[serde(rename = "FULL")]
FULL,
#[serde(rename = "OWNER")]
OWNER,
}
| 46.328502 | 99 | 0.676017 |
01d84e287bc9fbf788446269c646562b3cf8086a | 37,806 | ///////////////////////////////////////////////////////////////////////////
// # Type combining
//
// There are four type combiners: equate, sub, lub, and glb. Each
// implements the trait `Combine` and contains methods for combining
// two instances of various things and yielding a new instance. These
// combiner methods always yield a `Result<T>`. There is a lot of
// common code for these operations, implemented as default methods on
// the `Combine` trait.
//
// Each operation may have side-effects on the inference context,
// though these can be unrolled using snapshots. On success, the
// LUB/GLB operations return the appropriate bound. The Eq and Sub
// operations generally return the first operand.
//
// ## Contravariance
//
// When you are relating two things which have a contravariant
// relationship, you should use `contratys()` or `contraregions()`,
// rather than inversing the order of arguments! This is necessary
// because the order of arguments is not relevant for LUB and GLB. It
// is also useful to track which value is the "expected" value in
// terms of error reporting.
use super::glb::Glb;
use super::lub::Lub;
use super::sub::Sub;
use super::type_variable::TypeVariableValue;
use super::unify_key::replace_if_possible;
use super::unify_key::{ConstVarValue, ConstVariableValue};
use super::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
use super::{equate::Equate, type_variable::Diverging};
use super::{InferCtxt, MiscVariable, TypeTrace};
use crate::traits::{Obligation, PredicateObligations};
use rustc_data_structures::sso::SsoHashMap;
use rustc_hir::def_id::DefId;
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, InferConst, ToPredicate, Ty, TyCtxt, TypeFoldable};
use rustc_middle::ty::{IntType, UintType};
use rustc_span::{Span, DUMMY_SP};
#[derive(Clone)]
pub struct CombineFields<'infcx, 'tcx> {
pub infcx: &'infcx InferCtxt<'infcx, 'tcx>,
pub trace: TypeTrace<'tcx>,
pub cause: Option<ty::relate::Cause>,
pub param_env: ty::ParamEnv<'tcx>,
pub obligations: PredicateObligations<'tcx>,
}
#[derive(Copy, Clone, Debug)]
pub enum RelationDir {
SubtypeOf,
SupertypeOf,
EqTo,
}
impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> {
pub fn super_combine_tys<R>(
&self,
relation: &mut R,
a: Ty<'tcx>,
b: Ty<'tcx>,
) -> RelateResult<'tcx, Ty<'tcx>>
where
R: TypeRelation<'tcx>,
{
let a_is_expected = relation.a_is_expected();
match (a.kind(), b.kind()) {
// Relate integral variables to other types
(&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => {
self.inner
.borrow_mut()
.int_unification_table()
.unify_var_var(a_id, b_id)
.map_err(|e| int_unification_error(a_is_expected, e))?;
Ok(a)
}
(&ty::Infer(ty::IntVar(v_id)), &ty::Int(v)) => {
self.unify_integral_variable(a_is_expected, v_id, IntType(v))
}
(&ty::Int(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
}
(&ty::Infer(ty::IntVar(v_id)), &ty::Uint(v)) => {
self.unify_integral_variable(a_is_expected, v_id, UintType(v))
}
(&ty::Uint(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
}
// Relate floating-point variables to other types
(&ty::Infer(ty::FloatVar(a_id)), &ty::Infer(ty::FloatVar(b_id))) => {
self.inner
.borrow_mut()
.float_unification_table()
.unify_var_var(a_id, b_id)
.map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
Ok(a)
}
(&ty::Infer(ty::FloatVar(v_id)), &ty::Float(v)) => {
self.unify_float_variable(a_is_expected, v_id, v)
}
(&ty::Float(v), &ty::Infer(ty::FloatVar(v_id))) => {
self.unify_float_variable(!a_is_expected, v_id, v)
}
// All other cases of inference are errors
(&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
Err(TypeError::Sorts(ty::relate::expected_found(relation, a, b)))
}
_ => ty::relate::super_relate_tys(relation, a, b),
}
}
pub fn super_combine_consts<R>(
&self,
relation: &mut R,
a: &'tcx ty::Const<'tcx>,
b: &'tcx ty::Const<'tcx>,
) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>>
where
R: ConstEquateRelation<'tcx>,
{
debug!("{}.consts({:?}, {:?})", relation.tag(), a, b);
if a == b {
return Ok(a);
}
let a = replace_if_possible(&mut self.inner.borrow_mut().const_unification_table(), a);
let b = replace_if_possible(&mut self.inner.borrow_mut().const_unification_table(), b);
let a_is_expected = relation.a_is_expected();
match (a.val, b.val) {
(
ty::ConstKind::Infer(InferConst::Var(a_vid)),
ty::ConstKind::Infer(InferConst::Var(b_vid)),
) => {
self.inner
.borrow_mut()
.const_unification_table()
.unify_var_var(a_vid, b_vid)
.map_err(|e| const_unification_error(a_is_expected, e))?;
return Ok(a);
}
// All other cases of inference with other variables are errors.
(ty::ConstKind::Infer(InferConst::Var(_)), ty::ConstKind::Infer(_))
| (ty::ConstKind::Infer(_), ty::ConstKind::Infer(InferConst::Var(_))) => {
bug!("tried to combine ConstKind::Infer/ConstKind::Infer(InferConst::Var)")
}
(ty::ConstKind::Infer(InferConst::Var(vid)), _) => {
return self.unify_const_variable(relation.param_env(), vid, b, a_is_expected);
}
(_, ty::ConstKind::Infer(InferConst::Var(vid))) => {
return self.unify_const_variable(relation.param_env(), vid, a, !a_is_expected);
}
(ty::ConstKind::Unevaluated(..), _) if self.tcx.lazy_normalization() => {
// FIXME(#59490): Need to remove the leak check to accommodate
// escaping bound variables here.
if !a.has_escaping_bound_vars() && !b.has_escaping_bound_vars() {
relation.const_equate_obligation(a, b);
}
return Ok(b);
}
(_, ty::ConstKind::Unevaluated(..)) if self.tcx.lazy_normalization() => {
// FIXME(#59490): Need to remove the leak check to accommodate
// escaping bound variables here.
if !a.has_escaping_bound_vars() && !b.has_escaping_bound_vars() {
relation.const_equate_obligation(a, b);
}
return Ok(a);
}
_ => {}
}
ty::relate::super_relate_consts(relation, a, b)
}
/// Unifies the const variable `target_vid` with the given constant.
///
/// This also tests if the given const `ct` contains an inference variable which was previously
/// unioned with `target_vid`. If this is the case, inferring `target_vid` to `ct`
/// would result in an infinite type as we continuously replace an inference variable
/// in `ct` with `ct` itself.
///
/// This is especially important as unevaluated consts use their parents generics.
/// They therefore often contain unused substs, making these errors far more likely.
///
/// A good example of this is the following:
///
/// ```rust
/// #![feature(const_generics)]
///
/// fn bind<const N: usize>(value: [u8; N]) -> [u8; 3 + 4] {
/// todo!()
/// }
///
/// fn main() {
/// let mut arr = Default::default();
/// arr = bind(arr);
/// }
/// ```
///
/// Here `3 + 4` ends up as `ConstKind::Unevaluated` which uses the generics
/// of `fn bind` (meaning that its substs contain `N`).
///
/// `bind(arr)` now infers that the type of `arr` must be `[u8; N]`.
/// The assignment `arr = bind(arr)` now tries to equate `N` with `3 + 4`.
///
/// As `3 + 4` contains `N` in its substs, this must not succeed.
///
/// See `src/test/ui/const-generics/occurs-check/` for more examples where this is relevant.
#[instrument(level = "debug", skip(self))]
fn unify_const_variable(
&self,
param_env: ty::ParamEnv<'tcx>,
target_vid: ty::ConstVid<'tcx>,
ct: &'tcx ty::Const<'tcx>,
vid_is_expected: bool,
) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
let (for_universe, span) = {
let mut inner = self.inner.borrow_mut();
let variable_table = &mut inner.const_unification_table();
let var_value = variable_table.probe_value(target_vid);
match var_value.val {
ConstVariableValue::Known { value } => {
bug!("instantiating {:?} which has a known value {:?}", target_vid, value)
}
ConstVariableValue::Unknown { universe } => (universe, var_value.origin.span),
}
};
let value = ConstInferUnifier { infcx: self, span, param_env, for_universe, target_vid }
.relate(ct, ct)?;
self.inner
.borrow_mut()
.const_unification_table()
.unify_var_value(
target_vid,
ConstVarValue {
origin: ConstVariableOrigin {
kind: ConstVariableOriginKind::ConstInference,
span: DUMMY_SP,
},
val: ConstVariableValue::Known { value },
},
)
.map(|()| value)
.map_err(|e| const_unification_error(vid_is_expected, e))
}
fn unify_integral_variable(
&self,
vid_is_expected: bool,
vid: ty::IntVid,
val: ty::IntVarValue,
) -> RelateResult<'tcx, Ty<'tcx>> {
self.inner
.borrow_mut()
.int_unification_table()
.unify_var_value(vid, Some(val))
.map_err(|e| int_unification_error(vid_is_expected, e))?;
match val {
IntType(v) => Ok(self.tcx.mk_mach_int(v)),
UintType(v) => Ok(self.tcx.mk_mach_uint(v)),
}
}
fn unify_float_variable(
&self,
vid_is_expected: bool,
vid: ty::FloatVid,
val: ty::FloatTy,
) -> RelateResult<'tcx, Ty<'tcx>> {
self.inner
.borrow_mut()
.float_unification_table()
.unify_var_value(vid, Some(ty::FloatVarValue(val)))
.map_err(|e| float_unification_error(vid_is_expected, e))?;
Ok(self.tcx.mk_mach_float(val))
}
}
impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {
pub fn tcx(&self) -> TyCtxt<'tcx> {
self.infcx.tcx
}
pub fn equate<'a>(&'a mut self, a_is_expected: bool) -> Equate<'a, 'infcx, 'tcx> {
Equate::new(self, a_is_expected)
}
pub fn sub<'a>(&'a mut self, a_is_expected: bool) -> Sub<'a, 'infcx, 'tcx> {
Sub::new(self, a_is_expected)
}
pub fn lub<'a>(&'a mut self, a_is_expected: bool) -> Lub<'a, 'infcx, 'tcx> {
Lub::new(self, a_is_expected)
}
pub fn glb<'a>(&'a mut self, a_is_expected: bool) -> Glb<'a, 'infcx, 'tcx> {
Glb::new(self, a_is_expected)
}
/// Here, `dir` is either `EqTo`, `SubtypeOf`, or `SupertypeOf`.
/// The idea is that we should ensure that the type `a_ty` is equal
/// to, a subtype of, or a supertype of (respectively) the type
/// to which `b_vid` is bound.
///
/// Since `b_vid` has not yet been instantiated with a type, we
/// will first instantiate `b_vid` with a *generalized* version
/// of `a_ty`. Generalization introduces other inference
/// variables wherever subtyping could occur.
pub fn instantiate(
&mut self,
a_ty: Ty<'tcx>,
dir: RelationDir,
b_vid: ty::TyVid,
a_is_expected: bool,
) -> RelateResult<'tcx, ()> {
use self::RelationDir::*;
// Get the actual variable that b_vid has been inferred to
debug_assert!(self.infcx.inner.borrow_mut().type_variables().probe(b_vid).is_unknown());
debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", a_ty, dir, b_vid);
// Generalize type of `a_ty` appropriately depending on the
// direction. As an example, assume:
//
// - `a_ty == &'x ?1`, where `'x` is some free region and `?1` is an
// inference variable,
// - and `dir` == `SubtypeOf`.
//
// Then the generalized form `b_ty` would be `&'?2 ?3`, where
// `'?2` and `?3` are fresh region/type inference
// variables. (Down below, we will relate `a_ty <: b_ty`,
// adding constraints like `'x: '?2` and `?1 <: ?3`.)
let Generalization { ty: b_ty, needs_wf } = self.generalize(a_ty, b_vid, dir)?;
debug!(
"instantiate(a_ty={:?}, dir={:?}, b_vid={:?}, generalized b_ty={:?})",
a_ty, dir, b_vid, b_ty
);
self.infcx.inner.borrow_mut().type_variables().instantiate(b_vid, b_ty);
if needs_wf {
self.obligations.push(Obligation::new(
self.trace.cause.clone(),
self.param_env,
ty::PredicateKind::WellFormed(b_ty.into()).to_predicate(self.infcx.tcx),
));
}
// Finally, relate `b_ty` to `a_ty`, as described in previous comment.
//
// FIXME(#16847): This code is non-ideal because all these subtype
// relations wind up attributed to the same spans. We need
// to associate causes/spans with each of the relations in
// the stack to get this right.
match dir {
EqTo => self.equate(a_is_expected).relate(a_ty, b_ty),
SubtypeOf => self.sub(a_is_expected).relate(a_ty, b_ty),
SupertypeOf => self.sub(a_is_expected).relate_with_variance(
ty::Contravariant,
ty::VarianceDiagInfo::default(),
a_ty,
b_ty,
),
}?;
Ok(())
}
/// Attempts to generalize `ty` for the type variable `for_vid`.
/// This checks for cycle -- that is, whether the type `ty`
/// references `for_vid`. The `dir` is the "direction" for which we
/// a performing the generalization (i.e., are we producing a type
/// that can be used as a supertype etc).
///
/// Preconditions:
///
/// - `for_vid` is a "root vid"
fn generalize(
&self,
ty: Ty<'tcx>,
for_vid: ty::TyVid,
dir: RelationDir,
) -> RelateResult<'tcx, Generalization<'tcx>> {
debug!("generalize(ty={:?}, for_vid={:?}, dir={:?}", ty, for_vid, dir);
// Determine the ambient variance within which `ty` appears.
// The surrounding equation is:
//
// ty [op] ty2
//
// where `op` is either `==`, `<:`, or `:>`. This maps quite
// naturally.
let ambient_variance = match dir {
RelationDir::EqTo => ty::Invariant,
RelationDir::SubtypeOf => ty::Covariant,
RelationDir::SupertypeOf => ty::Contravariant,
};
debug!("generalize: ambient_variance = {:?}", ambient_variance);
let for_universe = match self.infcx.inner.borrow_mut().type_variables().probe(for_vid) {
v @ TypeVariableValue::Known { .. } => {
bug!("instantiating {:?} which has a known value {:?}", for_vid, v,)
}
TypeVariableValue::Unknown { universe } => universe,
};
debug!("generalize: for_universe = {:?}", for_universe);
debug!("generalize: trace = {:?}", self.trace);
let mut generalize = Generalizer {
infcx: self.infcx,
cause: &self.trace.cause,
for_vid_sub_root: self.infcx.inner.borrow_mut().type_variables().sub_root_var(for_vid),
for_universe,
ambient_variance,
needs_wf: false,
root_ty: ty,
param_env: self.param_env,
cache: SsoHashMap::new(),
};
let ty = match generalize.relate(ty, ty) {
Ok(ty) => ty,
Err(e) => {
debug!("generalize: failure {:?}", e);
return Err(e);
}
};
let needs_wf = generalize.needs_wf;
debug!("generalize: success {{ {:?}, {:?} }}", ty, needs_wf);
Ok(Generalization { ty, needs_wf })
}
pub fn add_const_equate_obligation(
&mut self,
a_is_expected: bool,
a: &'tcx ty::Const<'tcx>,
b: &'tcx ty::Const<'tcx>,
) {
let predicate = if a_is_expected {
ty::PredicateKind::ConstEquate(a, b)
} else {
ty::PredicateKind::ConstEquate(b, a)
};
self.obligations.push(Obligation::new(
self.trace.cause.clone(),
self.param_env,
predicate.to_predicate(self.tcx()),
));
}
}
struct Generalizer<'cx, 'tcx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
/// The span, used when creating new type variables and things.
cause: &'cx ObligationCause<'tcx>,
/// The vid of the type variable that is in the process of being
/// instantiated; if we find this within the type we are folding,
/// that means we would have created a cyclic type.
for_vid_sub_root: ty::TyVid,
/// The universe of the type variable that is in the process of
/// being instantiated. Any fresh variables that we create in this
/// process should be in that same universe.
for_universe: ty::UniverseIndex,
/// Track the variance as we descend into the type.
ambient_variance: ty::Variance,
/// See the field `needs_wf` in `Generalization`.
needs_wf: bool,
/// The root type that we are generalizing. Used when reporting cycles.
root_ty: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
cache: SsoHashMap<Ty<'tcx>, RelateResult<'tcx, Ty<'tcx>>>,
}
/// Result from a generalization operation. This includes
/// not only the generalized type, but also a bool flag
/// indicating whether further WF checks are needed.
struct Generalization<'tcx> {
ty: Ty<'tcx>,
/// If true, then the generalized type may not be well-formed,
/// even if the source type is well-formed, so we should add an
/// additional check to enforce that it is. This arises in
/// particular around 'bivariant' type parameters that are only
/// constrained by a where-clause. As an example, imagine a type:
///
/// struct Foo<A, B> where A: Iterator<Item = B> {
/// data: A
/// }
///
/// here, `A` will be covariant, but `B` is
/// unconstrained. However, whatever it is, for `Foo` to be WF, it
/// must be equal to `A::Item`. If we have an input `Foo<?A, ?B>`,
/// then after generalization we will wind up with a type like
/// `Foo<?C, ?D>`. When we enforce that `Foo<?A, ?B> <: Foo<?C,
/// ?D>` (or `>:`), we will wind up with the requirement that `?A
/// <: ?C`, but no particular relationship between `?B` and `?D`
/// (after all, we do not know the variance of the normalized form
/// of `A::Item` with respect to `A`). If we do nothing else, this
/// may mean that `?D` goes unconstrained (as in #41677). So, in
/// this scenario where we create a new type variable in a
/// bivariant context, we set the `needs_wf` flag to true. This
/// will force the calling code to check that `WF(Foo<?C, ?D>)`
/// holds, which in turn implies that `?C::Item == ?D`. So once
/// `?C` is constrained, that should suffice to restrict `?D`.
needs_wf: bool,
}
impl TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.infcx.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
fn tag(&self) -> &'static str {
"Generalizer"
}
fn a_is_expected(&self) -> bool {
true
}
fn binders<T>(
&mut self,
a: ty::Binder<'tcx, T>,
b: ty::Binder<'tcx, T>,
) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
where
T: Relate<'tcx>,
{
Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
}
fn relate_item_substs(
&mut self,
item_def_id: DefId,
a_subst: SubstsRef<'tcx>,
b_subst: SubstsRef<'tcx>,
) -> RelateResult<'tcx, SubstsRef<'tcx>> {
if self.ambient_variance == ty::Variance::Invariant {
// Avoid fetching the variance if we are in an invariant
// context; no need, and it can induce dependency cycles
// (e.g., #41849).
relate::relate_substs(self, None, a_subst, b_subst)
} else {
let opt_variances = self.tcx().variances_of(item_def_id);
relate::relate_substs(self, Some(&opt_variances), a_subst, b_subst)
}
}
fn relate_with_variance<T: Relate<'tcx>>(
&mut self,
variance: ty::Variance,
_info: ty::VarianceDiagInfo<'tcx>,
a: T,
b: T,
) -> RelateResult<'tcx, T> {
let old_ambient_variance = self.ambient_variance;
self.ambient_variance = self.ambient_variance.xform(variance);
let result = self.relate(a, b);
self.ambient_variance = old_ambient_variance;
result
}
fn tys(&mut self, t: Ty<'tcx>, t2: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
assert_eq!(t, t2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
if let Some(result) = self.cache.get(&t) {
return result.clone();
}
debug!("generalize: t={:?}", t);
// Check to see whether the type we are generalizing references
// any other type variable related to `vid` via
// subtyping. This is basically our "occurs check", preventing
// us from creating infinitely sized types.
let result = match *t.kind() {
ty::Infer(ty::TyVar(vid)) => {
let vid = self.infcx.inner.borrow_mut().type_variables().root_var(vid);
let sub_vid = self.infcx.inner.borrow_mut().type_variables().sub_root_var(vid);
if sub_vid == self.for_vid_sub_root {
// If sub-roots are equal, then `for_vid` and
// `vid` are related via subtyping.
Err(TypeError::CyclicTy(self.root_ty))
} else {
let probe = self.infcx.inner.borrow_mut().type_variables().probe(vid);
match probe {
TypeVariableValue::Known { value: u } => {
debug!("generalize: known value {:?}", u);
self.relate(u, u)
}
TypeVariableValue::Unknown { universe } => {
match self.ambient_variance {
// Invariant: no need to make a fresh type variable.
ty::Invariant => {
if self.for_universe.can_name(universe) {
return Ok(t);
}
}
// Bivariant: make a fresh var, but we
// may need a WF predicate. See
// comment on `needs_wf` field for
// more info.
ty::Bivariant => self.needs_wf = true,
// Co/contravariant: this will be
// sufficiently constrained later on.
ty::Covariant | ty::Contravariant => (),
}
let origin =
*self.infcx.inner.borrow_mut().type_variables().var_origin(vid);
let new_var_id = self
.infcx
.inner
.borrow_mut()
.type_variables()
.new_var(self.for_universe, Diverging::NotDiverging, origin);
let u = self.tcx().mk_ty_var(new_var_id);
// Record that we replaced `vid` with `new_var_id` as part of a generalization
// operation. This is needed to detect cyclic types. To see why, see the
// docs in the `type_variables` module.
self.infcx.inner.borrow_mut().type_variables().sub(vid, new_var_id);
debug!("generalize: replacing original vid={:?} with new={:?}", vid, u);
Ok(u)
}
}
}
}
ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) => {
// No matter what mode we are in,
// integer/floating-point types must be equal to be
// relatable.
Ok(t)
}
_ => relate::super_relate_tys(self, t, t),
};
self.cache.insert(t, result.clone());
return result;
}
fn regions(
&mut self,
r: ty::Region<'tcx>,
r2: ty::Region<'tcx>,
) -> RelateResult<'tcx, ty::Region<'tcx>> {
assert_eq!(r, r2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
debug!("generalize: regions r={:?}", r);
match *r {
// Never make variables for regions bound within the type itself,
// nor for erased regions.
ty::ReLateBound(..) | ty::ReErased => {
return Ok(r);
}
ty::RePlaceholder(..)
| ty::ReVar(..)
| ty::ReEmpty(_)
| ty::ReStatic
| ty::ReEarlyBound(..)
| ty::ReFree(..) => {
// see common code below
}
}
// If we are in an invariant context, we can re-use the region
// as is, unless it happens to be in some universe that we
// can't name. (In the case of a region *variable*, we could
// use it if we promoted it into our universe, but we don't
// bother.)
if let ty::Invariant = self.ambient_variance {
let r_universe = self.infcx.universe_of_region(r);
if self.for_universe.can_name(r_universe) {
return Ok(r);
}
}
// FIXME: This is non-ideal because we don't give a
// very descriptive origin for this region variable.
Ok(self.infcx.next_region_var_in_universe(MiscVariable(self.cause.span), self.for_universe))
}
fn consts(
&mut self,
c: &'tcx ty::Const<'tcx>,
c2: &'tcx ty::Const<'tcx>,
) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
assert_eq!(c, c2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
match c.val {
ty::ConstKind::Infer(InferConst::Var(vid)) => {
let mut inner = self.infcx.inner.borrow_mut();
let variable_table = &mut inner.const_unification_table();
let var_value = variable_table.probe_value(vid);
match var_value.val {
ConstVariableValue::Known { value: u } => {
drop(inner);
self.relate(u, u)
}
ConstVariableValue::Unknown { universe } => {
if self.for_universe.can_name(universe) {
Ok(c)
} else {
let new_var_id = variable_table.new_key(ConstVarValue {
origin: var_value.origin,
val: ConstVariableValue::Unknown { universe: self.for_universe },
});
Ok(self.tcx().mk_const_var(new_var_id, c.ty))
}
}
}
}
ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
if self.tcx().lazy_normalization() =>
{
assert_eq!(promoted, None);
let substs = self.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
substs,
substs,
)?;
Ok(self.tcx().mk_const(ty::Const {
ty: c.ty,
val: ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }),
}))
}
_ => relate::super_relate_consts(self, c, c),
}
}
}
pub trait ConstEquateRelation<'tcx>: TypeRelation<'tcx> {
/// Register an obligation that both constants must be equal to each other.
///
/// If they aren't equal then the relation doesn't hold.
fn const_equate_obligation(&mut self, a: &'tcx ty::Const<'tcx>, b: &'tcx ty::Const<'tcx>);
}
pub trait RelateResultCompare<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T>
where
F: FnOnce() -> TypeError<'tcx>;
}
impl<'tcx, T: Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T>
where
F: FnOnce() -> TypeError<'tcx>,
{
self.clone().and_then(|s| if s == t { self.clone() } else { Err(f()) })
}
}
pub fn const_unification_error<'tcx>(
a_is_expected: bool,
(a, b): (&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>),
) -> TypeError<'tcx> {
TypeError::ConstMismatch(ty::relate::expected_found_bool(a_is_expected, a, b))
}
fn int_unification_error<'tcx>(
a_is_expected: bool,
v: (ty::IntVarValue, ty::IntVarValue),
) -> TypeError<'tcx> {
let (a, b) = v;
TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, a, b))
}
fn float_unification_error<'tcx>(
a_is_expected: bool,
v: (ty::FloatVarValue, ty::FloatVarValue),
) -> TypeError<'tcx> {
let (ty::FloatVarValue(a), ty::FloatVarValue(b)) = v;
TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, a, b))
}
struct ConstInferUnifier<'cx, 'tcx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
span: Span,
param_env: ty::ParamEnv<'tcx>,
for_universe: ty::UniverseIndex,
/// The vid of the const variable that is in the process of being
/// instantiated; if we find this within the const we are folding,
/// that means we would have created a cyclic const.
target_vid: ty::ConstVid<'tcx>,
}
// We use `TypeRelation` here to propagate `RelateResult` upwards.
//
// Both inputs are expected to be the same.
impl TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.infcx.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
fn tag(&self) -> &'static str {
"ConstInferUnifier"
}
fn a_is_expected(&self) -> bool {
true
}
fn relate_with_variance<T: Relate<'tcx>>(
&mut self,
_variance: ty::Variance,
_info: ty::VarianceDiagInfo<'tcx>,
a: T,
b: T,
) -> RelateResult<'tcx, T> {
// We don't care about variance here.
self.relate(a, b)
}
fn binders<T>(
&mut self,
a: ty::Binder<'tcx, T>,
b: ty::Binder<'tcx, T>,
) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
where
T: Relate<'tcx>,
{
Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
}
fn tys(&mut self, t: Ty<'tcx>, _t: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug_assert_eq!(t, _t);
debug!("ConstInferUnifier: t={:?}", t);
match t.kind() {
&ty::Infer(ty::TyVar(vid)) => {
let vid = self.infcx.inner.borrow_mut().type_variables().root_var(vid);
let probe = self.infcx.inner.borrow_mut().type_variables().probe(vid);
match probe {
TypeVariableValue::Known { value: u } => {
debug!("ConstOccursChecker: known value {:?}", u);
self.tys(u, u)
}
TypeVariableValue::Unknown { universe } => {
if self.for_universe.can_name(universe) {
return Ok(t);
}
let origin =
*self.infcx.inner.borrow_mut().type_variables().var_origin(vid);
let new_var_id = self.infcx.inner.borrow_mut().type_variables().new_var(
self.for_universe,
Diverging::NotDiverging,
origin,
);
let u = self.tcx().mk_ty_var(new_var_id);
debug!(
"ConstInferUnifier: replacing original vid={:?} with new={:?}",
vid, u
);
Ok(u)
}
}
}
ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) => Ok(t),
_ => relate::super_relate_tys(self, t, t),
}
}
fn regions(
&mut self,
r: ty::Region<'tcx>,
_r: ty::Region<'tcx>,
) -> RelateResult<'tcx, ty::Region<'tcx>> {
debug_assert_eq!(r, _r);
debug!("ConstInferUnifier: r={:?}", r);
match r {
// Never make variables for regions bound within the type itself,
// nor for erased regions.
ty::ReLateBound(..) | ty::ReErased => {
return Ok(r);
}
ty::RePlaceholder(..)
| ty::ReVar(..)
| ty::ReEmpty(_)
| ty::ReStatic
| ty::ReEarlyBound(..)
| ty::ReFree(..) => {
// see common code below
}
}
let r_universe = self.infcx.universe_of_region(r);
if self.for_universe.can_name(r_universe) {
return Ok(r);
} else {
// FIXME: This is non-ideal because we don't give a
// very descriptive origin for this region variable.
Ok(self.infcx.next_region_var_in_universe(MiscVariable(self.span), self.for_universe))
}
}
fn consts(
&mut self,
c: &'tcx ty::Const<'tcx>,
_c: &'tcx ty::Const<'tcx>,
) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
debug_assert_eq!(c, _c);
debug!("ConstInferUnifier: c={:?}", c);
match c.val {
ty::ConstKind::Infer(InferConst::Var(vid)) => {
let mut inner = self.infcx.inner.borrow_mut();
let variable_table = &mut inner.const_unification_table();
// Check if the current unification would end up
// unifying `target_vid` with a const which contains
// an inference variable which is unioned with `target_vid`.
//
// Not doing so can easily result in stack overflows.
if variable_table.unioned(self.target_vid, vid) {
return Err(TypeError::CyclicConst(c));
}
let var_value = variable_table.probe_value(vid);
match var_value.val {
ConstVariableValue::Known { value: u } => self.consts(u, u),
ConstVariableValue::Unknown { universe } => {
if self.for_universe.can_name(universe) {
Ok(c)
} else {
let new_var_id = variable_table.new_key(ConstVarValue {
origin: var_value.origin,
val: ConstVariableValue::Unknown { universe: self.for_universe },
});
Ok(self.tcx().mk_const_var(new_var_id, c.ty))
}
}
}
}
ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
if self.tcx().lazy_normalization() =>
{
assert_eq!(promoted, None);
let substs = self.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
substs,
substs,
)?;
Ok(self.tcx().mk_const(ty::Const {
ty: c.ty,
val: ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }),
}))
}
_ => relate::super_relate_consts(self, c, c),
}
}
}
| 37.881764 | 106 | 0.528302 |
8796a043f4ba2dc23145e6e46785795681ebdb05 | 6,541 | #![allow(dead_code)]
use std::sync::{Once, ONCE_INIT};
use std::{io, mem, raw};
use std::collections::HashMap;
use libc::{c_int,c_long,c_double,size_t};
use super::{consts,err,info,opt};
use super::err::ErrCode;
use http::body::Body;
use http::{header,Response};
use curl_ffi as ffi;
pub type ProgressCb<'a> = FnMut(uint, uint, uint, uint) + 'a;
pub struct Easy {
curl: *mut ffi::CURL
}
impl Easy {
pub fn new() -> Easy {
// Ensure that curl is globally initialized
global_init();
let handle = unsafe {
let p = ffi::curl_easy_init();
ffi::curl_easy_setopt(p, opt::NOPROGRESS, 0u);
p
};
Easy { curl: handle }
}
#[inline]
pub fn setopt<T: opt::OptVal>(&mut self, option: opt::Opt, val: T) -> Result<(), err::ErrCode> {
// TODO: Prevent setting callback related options
let mut res = err::ErrCode(err::OK);
unsafe {
val.with_c_repr(|repr| {
res = err::ErrCode(ffi::curl_easy_setopt(self.curl, option, repr));
})
}
if res.is_success() { Ok(()) } else { Err(res) }
}
pub fn perform(&mut self,
body: Option<&mut Body>,
progress: Option<Box<ProgressCb>>)
-> Result<Response, err::ErrCode> {
let mut builder = ResponseBuilder::new();
unsafe {
let resp_p: uint = mem::transmute(&builder);
let body_p: uint = match body {
Some(b) => mem::transmute(b),
None => 0
};
let progress_p: uint = match progress.as_ref() {
Some(cb) => mem::transmute(cb),
None => 0
};
// Set callback options
ffi::curl_easy_setopt(self.curl, opt::READFUNCTION, curl_read_fn);
ffi::curl_easy_setopt(self.curl, opt::READDATA, body_p);
ffi::curl_easy_setopt(self.curl, opt::WRITEFUNCTION, curl_write_fn);
ffi::curl_easy_setopt(self.curl, opt::WRITEDATA, resp_p);
ffi::curl_easy_setopt(self.curl, opt::HEADERFUNCTION, curl_header_fn);
ffi::curl_easy_setopt(self.curl, opt::HEADERDATA, resp_p);
ffi::curl_easy_setopt(self.curl, opt::PROGRESSFUNCTION, curl_progress_fn);
ffi::curl_easy_setopt(self.curl, opt::PROGRESSDATA, progress_p);
}
let err = err::ErrCode(unsafe { ffi::curl_easy_perform(self.curl) });
// If the request failed, abort here
if !err.is_success() {
return Err(err);
}
// Try to get the response code
builder.code = try!(self.get_response_code());
Ok(builder.build())
}
pub fn get_response_code(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::RESPONSE_CODE)) as uint)
}
pub fn get_total_time(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::TOTAL_TIME)) as uint)
}
fn get_info_long(&self, key: info::Key) -> Result<c_long, err::ErrCode> {
let v: c_long = 0;
let res = err::ErrCode(unsafe {
ffi::curl_easy_getinfo(self.curl as *const _, key, &v)
});
if !res.is_success() {
return Err(res);
}
Ok(v)
}
}
#[inline]
fn global_init() {
// Schedule curl to be cleaned up after we're done with this whole process
static mut INIT: Once = ONCE_INIT;
unsafe {
INIT.call_once(|| ::std::rt::at_exit(|| ffi::curl_global_cleanup()))
}
}
impl Drop for Easy {
fn drop(&mut self) {
unsafe { ffi::curl_easy_cleanup(self.curl) }
}
}
/*
*
* TODO: Move this into handle
*
*/
struct ResponseBuilder {
code: uint,
hdrs: HashMap<String,Vec<String>>,
body: Vec<u8>
}
impl ResponseBuilder {
fn new() -> ResponseBuilder {
ResponseBuilder {
code: 0,
hdrs: HashMap::new(),
body: Vec::new()
}
}
fn add_header(&mut self, name: &str, val: &str) {
// TODO: Reduce allocations
use std::ascii::OwnedAsciiExt;
let name = name.to_string().into_ascii_lowercase();
let inserted = match self.hdrs.get_mut(&name) {
Some(vals) => {
vals.push(val.to_string());
true
}
None => false
};
if !inserted {
self.hdrs.insert(name, vec!(val.to_string()));
}
}
fn build(self) -> Response {
let ResponseBuilder { code, hdrs, body } = self;
Response::new(code, hdrs, body)
}
}
/*
*
* ===== Callbacks =====
*/
pub extern "C" fn curl_read_fn(p: *mut u8, size: size_t, nmemb: size_t, body: *mut Body) -> size_t {
if body.is_null() {
return 0;
}
let dst : &mut [u8] = unsafe { mem::transmute(raw::Slice { data: p, len: (size * nmemb) as uint } )};
let body: &mut Body = unsafe { mem::transmute(body) };
match body.read(dst.as_mut_slice()) {
Ok(len) => len as size_t,
Err(e) => {
match e.kind {
io::EndOfFile => 0 as size_t,
_ => consts::CURL_READFUNC_ABORT as size_t
}
}
}
}
pub extern "C" fn curl_write_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: *mut ResponseBuilder) -> size_t {
if !resp.is_null() {
let builder: &mut ResponseBuilder = unsafe { mem::transmute(resp) };
let chunk : &[u8] = unsafe { mem::transmute(raw::Slice { data: p, len: (size * nmemb) as uint } )};
builder.body.push_all(chunk.as_slice());
}
size * nmemb
}
pub extern "C" fn curl_header_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: &mut ResponseBuilder) -> size_t {
// TODO: Skip the first call (it seems to be the status line)
let vec : &[u8] = unsafe { mem::transmute(raw::Slice { data: p, len: (size * nmemb) as uint } )};
match header::parse(vec.as_slice()) {
Some((name, val)) => {
resp.add_header(name, val);
}
None => {}
}
vec.len() as size_t
}
pub extern "C" fn curl_progress_fn(cb: *mut Box<ProgressCb>, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int {
#[inline]
fn to_uint(v: c_double) -> uint {
if v > 0.0 { v as uint } else { 0 }
}
if !cb.is_null() {
let cb: &mut ProgressCb = unsafe { &mut **cb };
(*cb)(to_uint(dltotal), to_uint(dlnow), to_uint(ultotal), to_uint(ulnow));
}
0
}
| 27.599156 | 143 | 0.551445 |
714d9941a66e056ff2cfea34aa758964a6e12b81 | 26,541 | use crate::clauses::builder::ClauseBuilder;
use crate::split::Split;
use chalk_ir::cast::{Cast, Caster};
use chalk_ir::interner::Interner;
use chalk_ir::*;
use chalk_rust_ir::*;
use std::iter;
/// Trait for lowering a given piece of rust-ir source (e.g., an impl
/// or struct definition) into its associated "program clauses" --
/// that is, into the lowered, logical rules that it defines.
pub trait ToProgramClauses<I: Interner> {
fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>);
}
impl<I: Interner> ToProgramClauses<I> for ImplDatum<I> {
/// Given `impl<T: Clone> Clone for Vec<T> { ... }`, generate:
///
/// ```notrust
/// -- Rule Implemented-From-Impl
/// forall<T> {
/// Implemented(Vec<T>: Clone) :- Implemented(T: Clone).
/// }
/// ```
///
/// For a negative impl like `impl... !Clone for ...`, however, we
/// generate nothing -- this is just a way to *opt out* from the
/// default auto trait impls, it doesn't have any positive effect
/// on its own.
fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>) {
if self.is_positive() {
let binders = self.binders.map_ref(|b| (&b.trait_ref, &b.where_clauses));
builder.push_binders(&binders, |builder, (trait_ref, where_clauses)| {
builder.push_clause(trait_ref, where_clauses);
});
}
}
}
impl<I: Interner> ToProgramClauses<I> for AssociatedTyValue<I> {
/// Given the following trait:
///
/// ```notrust
/// trait Iterable {
/// type IntoIter<'a>: 'a;
/// }
/// ```
///
/// Then for the following impl:
/// ```notrust
/// impl<T> Iterable for Vec<T> where T: Clone {
/// type IntoIter<'a> = Iter<'a, T>;
/// }
/// ```
///
/// we generate:
///
/// ```notrust
/// -- Rule Normalize-From-Impl
/// forall<'a, T> {
/// Normalize(<Vec<T> as Iterable>::IntoIter<'a> -> Iter<'a, T>>) :-
/// Implemented(T: Clone), // (1)
/// Implemented(Iter<'a, T>: 'a). // (2)
/// }
/// ```
fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>) {
let impl_datum = builder.db.impl_datum(self.impl_id);
let associated_ty = builder.db.associated_ty_data(self.associated_ty_id);
builder.push_binders(&self.value, |builder, assoc_ty_value| {
let all_parameters = builder.placeholders_in_scope().to_vec();
// Get the projection for this associated type:
//
// * `impl_params`: `[!T]`
// * `projection`: `<Vec<!T> as Iterable>::Iter<'!a>`
let (impl_params, projection) = builder
.db
.impl_parameters_and_projection_from_associated_ty_value(&all_parameters, self);
// Assemble the full list of conditions for projection to be valid.
// This comes in two parts, marked as (1) and (2) in doc above:
//
// 1. require that the where clauses from the impl apply
let interner = builder.db.interner();
let impl_where_clauses = impl_datum
.binders
.map_ref(|b| &b.where_clauses)
.into_iter()
.map(|wc| wc.substitute(interner, impl_params));
// 2. any where-clauses from the `type` declaration in the trait: the
// parameters must be substituted with those of the impl
let assoc_ty_where_clauses = associated_ty
.binders
.map_ref(|b| &b.where_clauses)
.into_iter()
.map(|wc| wc.substitute(interner, &projection.substitution));
// Create the final program clause:
//
// ```notrust
// -- Rule Normalize-From-Impl
// forall<'a, T> {
// Normalize(<Vec<T> as Iterable>::IntoIter<'a> -> Iter<'a, T>>) :-
// Implemented(T: Clone), // (1)
// Implemented(Iter<'a, T>: 'a). // (2)
// }
// ```
builder.push_clause(
Normalize {
alias: projection.clone(),
ty: assoc_ty_value.ty,
},
impl_where_clauses.chain(assoc_ty_where_clauses),
);
});
}
}
impl<I: Interner> ToProgramClauses<I> for StructDatum<I> {
/// Given the following type definition: `struct Foo<T: Eq> { }`, generate:
///
/// ```notrust
/// -- Rule WellFormed-Type
/// forall<T> {
/// WF(Foo<T>) :- WF(T: Eq).
/// }
///
/// -- Rule Implied-Bound-From-Type
/// forall<T> {
/// FromEnv(T: Eq) :- FromEnv(Foo<T>).
/// }
///
/// forall<T> {
/// IsFullyVisible(Foo<T>) :- IsFullyVisible(T).
/// }
/// ```
///
/// If the type `Foo` is marked `#[upstream]`, we also generate:
///
/// ```notrust
/// forall<T> { IsUpstream(Foo<T>). }
/// ```
///
/// Otherwise, if the type `Foo` is not marked `#[upstream]`, we generate:
/// ```notrust
/// forall<T> { IsLocal(Foo<T>). }
/// ```
///
/// Given an `#[upstream]` type that is also fundamental:
///
/// ```notrust
/// #[upstream]
/// #[fundamental]
/// struct Box<T> {}
/// ```
///
/// We generate the following clauses:
///
/// ```notrust
/// forall<T> { IsLocal(Box<T>) :- IsLocal(T). }
///
/// forall<T> { IsUpstream(Box<T>) :- IsUpstream(T). }
///
/// // Generated for both upstream and local fundamental types
/// forall<T> { DownstreamType(Box<T>) :- DownstreamType(T). }
/// ```
///
fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>) {
debug_heading!("StructDatum::to_program_clauses(self={:?})", self);
let interner = builder.interner();
let binders = self.binders.map_ref(|b| &b.where_clauses);
builder.push_binders(&binders, |builder, where_clauses| {
let self_appl_ty = &ApplicationTy {
name: self.id.cast(interner),
substitution: builder.substitution_in_scope(),
};
let self_ty = self_appl_ty.clone().intern(interner);
// forall<T> {
// WF(Foo<T>) :- WF(T: Eq).
// }
builder.push_clause(
WellFormed::Ty(self_ty.clone()),
where_clauses
.iter()
.cloned()
.map(|qwc| qwc.into_well_formed_goal(interner)),
);
// forall<T> {
// IsFullyVisible(Foo<T>) :- IsFullyVisible(T).
// }
builder.push_clause(
DomainGoal::IsFullyVisible(self_ty.clone()),
self_appl_ty
.type_parameters(interner)
.map(|ty| DomainGoal::IsFullyVisible(ty).cast::<Goal<_>>(interner)),
);
// Fundamental types often have rules in the form of:
// Goal(FundamentalType<T>) :- Goal(T)
// This macro makes creating that kind of clause easy
macro_rules! fundamental_rule {
($goal:ident) => {
// Fundamental types must always have at least one
// type parameter for this rule to make any
// sense. We currently do not have have any
// fundamental types with more than one type
// parameter, nor do we know what the behaviour
// for that should be. Thus, we are asserting here
// that there is only a single type parameter
// until the day when someone makes a decision
// about how that should behave.
assert_eq!(
self_appl_ty.len_type_parameters(interner),
1,
"Only fundamental types with a single parameter are supported"
);
builder.push_clause(
DomainGoal::$goal(self_ty.clone()),
Some(DomainGoal::$goal(
// This unwrap is safe because we asserted
// above for the presence of a type
// parameter
self_appl_ty.first_type_parameter(interner).unwrap(),
)),
);
};
}
// Types that are not marked `#[upstream]` satisfy IsLocal(TypeName)
if !self.flags.upstream {
// `IsLocalTy(Ty)` depends *only* on whether the type
// is marked #[upstream] and nothing else
builder.push_fact(DomainGoal::IsLocal(self_ty.clone()));
} else if self.flags.fundamental {
// If a type is `#[upstream]`, but is also
// `#[fundamental]`, it satisfies IsLocal if and only
// if its parameters satisfy IsLocal
fundamental_rule!(IsLocal);
fundamental_rule!(IsUpstream);
} else {
// The type is just upstream and not fundamental
builder.push_fact(DomainGoal::IsUpstream(self_ty.clone()));
}
if self.flags.fundamental {
fundamental_rule!(DownstreamType);
}
for qwc in where_clauses {
// Generate implied bounds rules. We have to push the binders from the where-clauses
// too -- e.g., if we had `struct Foo<T: for<'a> Bar<&'a i32>>`, we would
// create a reverse rule like:
//
// ```notrust
// forall<T, 'a> { FromEnv(T: Bar<&'a i32>) :- FromEnv(Foo<T>) }
// ```
//
// In other words, you can assume `T: Bar<&'a i32>`
// for any `'a` *if* you are assuming that `Foo<T>` is
// well formed.
builder.push_binders(&qwc, |builder, wc| {
builder.push_clause(
wc.into_from_env_goal(interner),
Some(self_ty.clone().from_env()),
);
});
}
});
}
}
impl<I: Interner> ToProgramClauses<I> for TraitDatum<I> {
/// Given the following trait declaration: `trait Ord<T> where Self: Eq<T> { ... }`, generate:
///
/// ```notrust
/// -- Rule WellFormed-TraitRef
/// forall<Self, T> {
/// WF(Self: Ord<T>) :- Implemented(Self: Ord<T>), WF(Self: Eq<T>).
/// }
/// ```
///
/// and the reverse rules:
///
/// ```notrust
/// -- Rule Implemented-From-Env
/// forall<Self, T> {
/// (Self: Ord<T>) :- FromEnv(Self: Ord<T>).
/// }
///
/// -- Rule Implied-Bound-From-Trait
/// forall<Self, T> {
/// FromEnv(Self: Eq<T>) :- FromEnv(Self: Ord<T>).
/// }
/// ```
///
/// As specified in the orphan rules, if a trait is not marked `#[upstream]`, the current crate
/// can implement it for any type. To represent that, we generate:
///
/// ```notrust
/// // `Ord<T>` would not be `#[upstream]` when compiling `std`
/// forall<Self, T> { LocalImplAllowed(Self: Ord<T>). }
/// ```
///
/// For traits that are `#[upstream]` (i.e. not in the current crate), the orphan rules dictate
/// that impls are allowed as long as at least one type parameter is local and each type
/// prior to that is fully visible. That means that each type prior to the first local
/// type cannot contain any of the type parameters of the impl.
///
/// This rule is fairly complex, so we expand it and generate a program clause for each
/// possible case. This is represented as follows:
///
/// ```notrust
/// // for `#[upstream] trait Foo<T, U, V> where Self: Eq<T> { ... }`
/// forall<Self, T, U, V> {
/// LocalImplAllowed(Self: Foo<T, U, V>) :- IsLocal(Self).
/// }
///
/// forall<Self, T, U, V> {
/// LocalImplAllowed(Self: Foo<T, U, V>) :-
/// IsFullyVisible(Self),
/// IsLocal(T).
/// }
///
/// forall<Self, T, U, V> {
/// LocalImplAllowed(Self: Foo<T, U, V>) :-
/// IsFullyVisible(Self),
/// IsFullyVisible(T),
/// IsLocal(U).
/// }
///
/// forall<Self, T, U, V> {
/// LocalImplAllowed(Self: Foo<T, U, V>) :-
/// IsFullyVisible(Self),
/// IsFullyVisible(T),
/// IsFullyVisible(U),
/// IsLocal(V).
/// }
/// ```
///
/// The overlap check uses compatible { ... } mode to ensure that it accounts for impls that
/// may exist in some other *compatible* world. For every upstream trait, we add a rule to
/// account for the fact that upstream crates are able to compatibly add impls of upstream
/// traits for upstream types.
///
/// ```notrust
/// // For `#[upstream] trait Foo<T, U, V> where Self: Eq<T> { ... }`
/// forall<Self, T, U, V> {
/// Implemented(Self: Foo<T, U, V>) :-
/// Implemented(Self: Eq<T>), // where clauses
/// Compatible, // compatible modality
/// IsUpstream(Self),
/// IsUpstream(T),
/// IsUpstream(U),
/// IsUpstream(V),
/// CannotProve. // returns ambiguous
/// }
/// ```
///
/// In certain situations, this is too restrictive. Consider the following code:
///
/// ```notrust
/// /* In crate std */
/// trait Sized { }
/// struct str { }
///
/// /* In crate bar (depends on std) */
/// trait Bar { }
/// impl Bar for str { }
/// impl<T> Bar for T where T: Sized { }
/// ```
///
/// Here, because of the rules we've defined, these two impls overlap. The std crate is
/// upstream to bar, and thus it is allowed to compatibly implement Sized for str. If str
/// can implement Sized in a compatible future, these two impls definitely overlap since the
/// second impl covers all types that implement Sized.
///
/// The solution we've got right now is to mark Sized as "fundamental" when it is defined.
/// This signals to the Rust compiler that it can rely on the fact that str does not
/// implement Sized in all contexts. A consequence of this is that we can no longer add an
/// implementation of Sized compatibly for str. This is the trade off you make when defining
/// a fundamental trait.
///
/// To implement fundamental traits, we simply just do not add the rule above that allows
/// upstream types to implement upstream traits. Fundamental traits are not allowed to
/// compatibly do that.
fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>) {
let interner = builder.interner();
let binders = self.binders.map_ref(|b| &b.where_clauses);
builder.push_binders(&binders, |builder, where_clauses| {
let trait_ref = chalk_ir::TraitRef {
trait_id: self.id,
substitution: builder.substitution_in_scope(),
};
builder.push_clause(
trait_ref.clone().well_formed(),
where_clauses
.iter()
.cloned()
.map(|qwc| qwc.into_well_formed_goal(interner))
.casted::<Goal<_>>(interner)
.chain(Some(trait_ref.clone().cast(interner))),
);
// The number of parameters will always be at least 1
// because of the Self parameter that is automatically
// added to every trait. This is important because
// otherwise the added program clauses would not have any
// conditions.
let type_parameters: Vec<_> = trait_ref.type_parameters(interner).collect();
// Add all cases for potential downstream impls that could exist
for i in 0..type_parameters.len() {
builder.push_clause(
trait_ref.clone(),
where_clauses
.iter()
.cloned()
.casted(interner)
.chain(iter::once(DomainGoal::Compatible(()).cast(interner)))
.chain((0..i).map(|j| {
DomainGoal::IsFullyVisible(type_parameters[j].clone()).cast(interner)
}))
.chain(iter::once(
DomainGoal::DownstreamType(type_parameters[i].clone()).cast(interner),
))
.chain(iter::once(GoalData::CannotProve(()).intern(interner))),
);
}
// Orphan rules:
if !self.flags.upstream {
// Impls for traits declared locally always pass the impl rules
builder.push_fact(DomainGoal::LocalImplAllowed(trait_ref.clone()));
} else {
// Impls for remote traits must have a local type in the right place
for i in 0..type_parameters.len() {
builder.push_clause(
DomainGoal::LocalImplAllowed(trait_ref.clone()),
(0..i)
.map(|j| DomainGoal::IsFullyVisible(type_parameters[j].clone()))
.chain(Some(DomainGoal::IsLocal(type_parameters[i].clone()))),
);
}
}
// Fundamental traits can be reasoned about negatively without any ambiguity, so no
// need for this rule if the trait is fundamental.
if !self.flags.fundamental {
builder.push_clause(
trait_ref.clone(),
where_clauses
.iter()
.cloned()
.casted(interner)
.chain(iter::once(DomainGoal::Compatible(()).cast(interner)))
.chain(
trait_ref
.type_parameters(interner)
.map(|ty| DomainGoal::IsUpstream(ty).cast(interner)),
)
.chain(iter::once(GoalData::CannotProve(()).intern(interner))),
);
}
// Reverse implied bound rules: given (e.g.) `trait Foo: Bar + Baz`,
// we create rules like:
//
// ```
// FromEnv(T: Bar) :- FromEnv(T: Foo)
// ```
//
// and
//
// ```
// FromEnv(T: Baz) :- FromEnv(T: Foo)
// ```
for qwc in &where_clauses {
builder.push_binders(qwc, |builder, wc| {
builder.push_clause(
wc.into_from_env_goal(interner),
Some(trait_ref.clone().from_env()),
);
});
}
// Finally, for every trait `Foo` we make a rule
//
// ```
// Implemented(T: Foo) :- FromEnv(T: Foo)
// ```
builder.push_clause(trait_ref.clone(), Some(trait_ref.clone().from_env()));
});
}
}
impl<I: Interner> ToProgramClauses<I> for AssociatedTyDatum<I> {
/// For each associated type, we define the "projection
/// equality" rules. There are always two; one for a successful normalization,
/// and one for the "fallback" notion of equality.
///
/// Given: (here, `'a` and `T` represent zero or more parameters)
///
/// ```notrust
/// trait Foo {
/// type Assoc<'a, T>: Bounds where WC;
/// }
/// ```
///
/// we generate the 'fallback' rule:
///
/// ```notrust
/// -- Rule AliasEq-Placeholder
/// forall<Self, 'a, T> {
/// AliasEq(<Self as Foo>::Assoc<'a, T> = (Foo::Assoc<'a, T>)<Self>).
/// }
/// ```
///
/// and
///
/// ```notrust
/// -- Rule AliasEq-Normalize
/// forall<Self, 'a, T, U> {
/// AliasEq(<T as Foo>::Assoc<'a, T> = U) :-
/// Normalize(<T as Foo>::Assoc -> U).
/// }
/// ```
///
/// We used to generate an "elaboration" rule like this:
///
/// ```notrust
/// forall<T> {
/// T: Foo :- exists<U> { AliasEq(<T as Foo>::Assoc = U) }.
/// }
/// ```
///
/// but this caused problems with the recursive solver. In
/// particular, whenever normalization is possible, we cannot
/// solve that projection uniquely, since we can now elaborate
/// `AliasEq` to fallback *or* normalize it. So instead we
/// handle this kind of reasoning through the `FromEnv` predicate.
///
/// We also generate rules specific to WF requirements and implied bounds:
///
/// ```notrust
/// -- Rule WellFormed-AssocTy
/// forall<Self, 'a, T> {
/// WellFormed((Foo::Assoc)<Self, 'a, T>) :- WellFormed(Self: Foo), WellFormed(WC).
/// }
///
/// -- Rule Implied-WC-From-AssocTy
/// forall<Self, 'a, T> {
/// FromEnv(WC) :- FromEnv((Foo::Assoc)<Self, 'a, T>).
/// }
///
/// -- Rule Implied-Bound-From-AssocTy
/// forall<Self, 'a, T> {
/// FromEnv(<Self as Foo>::Assoc<'a,T>: Bounds) :- FromEnv(Self: Foo), WC.
/// }
///
/// -- Rule Implied-Trait-From-AssocTy
/// forall<Self,'a, T> {
/// FromEnv(Self: Foo) :- FromEnv((Foo::Assoc)<Self, 'a,T>).
/// }
/// ```
fn to_program_clauses(&self, builder: &mut ClauseBuilder<'_, I>) {
let interner = builder.interner();
let binders = self.binders.map_ref(|b| (&b.where_clauses, &b.bounds));
builder.push_binders(&binders, |builder, (where_clauses, bounds)| {
let substitution = builder.substitution_in_scope();
let alias = AliasTy {
associated_ty_id: self.id,
substitution: substitution.clone(),
};
let projection_ty = alias.clone().intern(interner);
// Retrieve the trait ref embedding the associated type
let trait_ref = builder.db.trait_ref_from_projection(&alias);
// Construct an application from the projection. So if we have `<T as Iterator>::Item`,
// we would produce `(Iterator::Item)<T>`.
let app_ty: Ty<_> = ApplicationTy {
name: TypeName::AssociatedType(self.id),
substitution,
}
.intern(interner);
let alias_eq = AliasEq {
alias: alias.clone(),
ty: app_ty.clone(),
};
// Fallback rule. The solver uses this to move between the projection
// and placeholder type.
//
// forall<Self> {
// AliasEq(<Self as Foo>::Assoc = (Foo::Assoc)<Self>).
// }
builder.push_fact(alias_eq);
// Well-formedness of projection type.
//
// forall<Self> {
// WellFormed((Foo::Assoc)<Self>) :- WellFormed(Self: Foo), WellFormed(WC).
// }
builder.push_clause(
WellFormed::Ty(app_ty.clone()),
iter::once(WellFormed::Trait(trait_ref.clone()).cast::<Goal<_>>(interner)).chain(
where_clauses
.iter()
.cloned()
.map(|qwc| qwc.into_well_formed_goal(interner))
.casted(interner),
),
);
// Assuming well-formedness of projection type means we can assume
// the trait ref as well. Mostly used in function bodies.
//
// forall<Self> {
// FromEnv(Self: Foo) :- FromEnv((Foo::Assoc)<Self>).
// }
builder.push_clause(FromEnv::Trait(trait_ref.clone()), Some(app_ty.from_env()));
// Reverse rule for where clauses.
//
// forall<Self> {
// FromEnv(WC) :- FromEnv((Foo::Assoc)<Self>).
// }
//
// This is really a family of clauses, one for each where clause.
for qwc in &where_clauses {
builder.push_binders(qwc, |builder, wc| {
builder.push_clause(
wc.into_from_env_goal(interner),
Some(FromEnv::Ty(app_ty.clone())),
);
});
}
// Reverse rule for implied bounds.
//
// forall<Self> {
// FromEnv(<Self as Foo>::Assoc: Bounds) :- FromEnv(Self: Foo), WC
// }
for quantified_bound in &bounds {
builder.push_binders(quantified_bound, |builder, bound| {
for wc in bound.into_where_clauses(interner, projection_ty.clone()) {
builder.push_clause(
wc.into_from_env_goal(interner),
iter::once(FromEnv::Trait(trait_ref.clone()).cast::<Goal<_>>(interner))
.chain(where_clauses.iter().cloned().casted(interner)),
);
}
});
}
// add new type parameter U
builder.push_bound_ty(|builder, ty| {
// `Normalize(<T as Foo>::Assoc -> U)`
let normalize = Normalize {
alias: alias.clone(),
ty: ty.clone(),
};
// `AliasEq(<T as Foo>::Assoc = U)`
let alias_eq = AliasEq { alias, ty };
// Projection equality rule from above.
//
// forall<T, U> {
// AliasEq(<T as Foo>::Assoc = U) :-
// Normalize(<T as Foo>::Assoc -> U).
// }
builder.push_clause(alias_eq, Some(normalize));
});
});
}
}
| 38.633188 | 100 | 0.49399 |
619da440099171f21923d94cfda9132ae4a6711f | 819 | /// This enumeration is exposing the errors that can occurs during RTP packet
/// marshalling or unmarshalling process.
#[derive(Debug, Fail)]
pub enum RtpPacketError {
/// Emitted when the RTP version used to encode the packet is not supported.
#[fail(display = "Invalid RTP version used: {}", version)]
InvalidRtpVersion { version: u8 },
/// Emitted when the RTP header extension has not a payload made of 4-byte words.
#[fail(display = "Invalid RTP header extension provided: {}", length)]
InvalidRtpHeaderExtension { length: usize },
/// Emitted when the marshalled RTP packet is not a valid one. Either it's too
/// small to contain a header, either it's too small to contain a payload.
#[fail(display = "Provided marshalled RTP packet is not valid")]
InvalidRtpPacket,
}
| 45.5 | 85 | 0.709402 |
cceb2b2d5b6b58af81de8a19dba67b5c1e5b209a | 15,833 | // Copyright (c) 2019, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use hhbc_string_utils_rust::mangle_xhp_id;
use ocamlrep::rc::RcOc;
use oxidized::{file_info::Mode, relative_path::RelativePath};
use parser_rust::{
parser::Parser, parser_env::ParserEnv, smart_constructors_wrappers::WithKind,
source_text::SourceText,
};
use syntax_tree::mode_parser::parse_mode;
use crate::facts::*;
use crate::facts_smart_constructors::*;
pub type FactsParser<'a> = Parser<'a, WithKind<FactsSmartConstructors<'a>>, HasScriptContent<'a>>;
pub struct ExtractAsJsonOpts {
pub php5_compat_mode: bool,
pub hhvm_compat_mode: bool,
pub allow_new_attribute_syntax: bool,
pub filename: RelativePath,
}
pub fn extract_as_json(text: &[u8], opts: ExtractAsJsonOpts) -> Option<String> {
from_text(text, opts).map(|facts| facts.to_json(text))
}
pub fn from_text(text: &[u8], opts: ExtractAsJsonOpts) -> Option<Facts> {
let ExtractAsJsonOpts {
php5_compat_mode,
hhvm_compat_mode,
allow_new_attribute_syntax,
filename,
} = opts;
let text = SourceText::make(RcOc::new(filename), text);
let is_experimental_mode = match parse_mode(&text) {
Some(Mode::Mexperimental) => true,
_ => false,
};
let env = ParserEnv {
php5_compat_mode,
hhvm_compat_mode,
is_experimental_mode,
allow_new_attribute_syntax,
..ParserEnv::default()
};
let mut parser = FactsParser::make(&text, env);
let root = parser.parse_script(None);
// report errors only if result of parsing is non-empty *)
if parser.sc_state().0 && !parser.errors().is_empty() {
None
} else {
Some(collect(("".to_owned(), Facts::default()), root).1)
}
}
// implementation details
use std::string::String;
use Node::*; // Ensure String doesn't refer to Node::String
fn qualified_name(namespace: &str, name: Node) -> Option<String> {
fn qualified_name_from_parts(namespace: &str, parts: Vec<Node>) -> Option<String> {
let mut qualified_name = String::new();
let mut leading_backslash = false;
for (index, part) in parts.into_iter().enumerate() {
match part {
Name(name) => {
qualified_name.push_str(&String::from_utf8_lossy(name.get().as_slice()))
}
Backslash if index == 0 => leading_backslash = true,
ListItem(listitem) => {
if let (Name(name), Backslash) = *listitem {
qualified_name.push_str(&String::from_utf8_lossy(name.get().as_slice()));
qualified_name.push_str("\\");
}
}
_ => return None,
}
}
Some(if leading_backslash || namespace.is_empty() {
qualified_name // globally qualified name
} else {
namespace.to_owned() + "\\" + &qualified_name
})
}
match name {
Name(name) => {
// always a simple name
let name = name.to_string();
Some(if namespace.is_empty() {
name
} else {
namespace.to_owned() + "\\" + &name
})
}
XhpName(name) => {
// xhp names are always unqualified
let name = name.to_string();
Some(mangle_xhp_id(name))
}
Node::QualifiedName(parts) => qualified_name_from_parts(namespace, parts),
_ => None,
}
}
fn modifiers_to_flags(modifiers: &Node) -> Flags {
let mut flags = Flag::default();
if let List(modifiers) = modifiers {
for modifier in modifiers {
flags = match modifier {
Node::Abstract => Flag::Abstract.set(flags),
Node::Final => Flag::Final.set(flags),
Node::Static => Flag::Final.set(Flag::Abstract.set(flags)),
_ => flags,
};
}
}
flags
}
fn typenames_from_list(list: Node, namespace: &str, names: &mut StringSet) {
match list {
Node::List(nodes) => nodes.into_iter().for_each(|name| {
if let Some(name) = qualified_name(namespace, name) {
names.insert(name);
}
}),
_ => (),
};
}
fn define_name(name: &[u8]) -> String {
let name = &String::from_utf8_lossy(name);
name[1..name.len() - 2].to_owned() // strip quotes
}
fn defines_from_method_body(constants: Vec<String>, body: Node) -> Vec<String> {
fn aux(mut acc: Vec<String>, list: Node) -> Vec<String> {
match list {
Node::List(nodes) => nodes.into_iter().fold(acc, aux),
Node::Define(define) => {
if let Node::Name(name) = *define {
acc.push(define_name(&name.get()));
}
acc
}
_ => acc,
}
}
aux(constants, body)
}
fn type_info_from_class_body(
namespace: &str,
check_require: bool,
attributes: Node,
body: Node,
facts: &mut Facts,
type_facts: &mut TypeFacts,
) {
let aux = |mut constants: Vec<String>, node| {
if let RequireExtendsClause(name) = node {
if check_require {
if let Some(name) = qualified_name(namespace, *name) {
type_facts.require_extends.insert(name);
}
}
} else if let RequireImplementsClause(name) = node {
if check_require {
if let Some(name) = qualified_name(namespace, *name) {
type_facts.require_implements.insert(name);
}
}
} else if let TraitUseClause(uses) = node {
typenames_from_list(*uses, namespace, &mut type_facts.base_types);
} else if let MethodDecl(body) = node {
if namespace.is_empty() {
// in methods we collect only defines
constants = defines_from_method_body(constants, *body);
}
}
constants
};
if let List(nodes) = body {
let facts_constants = std::mem::replace(&mut facts.constants, vec![]);
facts.constants = nodes.into_iter().fold(facts_constants, aux);
}
type_facts.attributes = attributes_into_facts(namespace, attributes);
}
fn attributes_into_facts(namespace: &str, attributes: Node) -> Attributes {
match attributes {
Node::List(nodes) => nodes
.into_iter()
.fold(Attributes::new(), |mut attributes, node| match node {
Node::ListItem(item) => {
let attribute_values_aux = |attribute_node| match attribute_node {
Node::Name(name) => {
let mut attribute_values = Vec::new();
attribute_values.push(name.to_string());
attribute_values
}
Node::String(name) => {
let mut attribute_values = Vec::new();
attribute_values.push(name.to_unescaped_string());
attribute_values
}
Node::List(nodes) => {
nodes
.into_iter()
.fold(Vec::new(), |mut attribute_values, node| match node {
Node::Name(name) => {
attribute_values.push(name.to_string());
attribute_values
}
Node::String(name) => {
// TODO(T47593892) fold constant
attribute_values.push(name.to_unescaped_string());
attribute_values
}
Node::ScopeResolutionExpression(expr) => {
if let (Node::Name(name), Node::Class) = *expr {
attribute_values.push(if namespace.is_empty() {
name.to_string()
} else {
namespace.to_owned() + "\\" + &name.to_string()
});
}
attribute_values
}
_ => attribute_values,
})
}
_ => Vec::new(),
};
match &(item.0) {
Node::Name(name) => {
attributes.insert(name.to_string(), attribute_values_aux(item.1));
attributes
}
Node::String(name) => {
attributes
.insert(name.to_unescaped_string(), attribute_values_aux(item.1));
attributes
}
_ => attributes,
}
}
_ => attributes,
}),
_ => Attributes::new(),
}
}
fn class_decl_into_facts(decl: ClassDeclChildren, namespace: &str, mut facts: &mut Facts) {
if let Some(name) = qualified_name(namespace, decl.name) {
let (kind, flags) = match decl.kind {
Node::Class => (TypeKind::Class, modifiers_to_flags(&decl.modifiers)),
Node::Interface => (TypeKind::Interface, Flag::Abstract.as_flags()),
Node::Trait => (TypeKind::Trait, Flag::Abstract.as_flags()),
_ => (TypeKind::Unknown, Flag::default()),
};
let check_require = match kind {
TypeKind::Interface | TypeKind::Trait => true,
_ => false,
};
let mut decl_facts = TypeFacts {
kind,
flags,
attributes: Attributes::new(),
base_types: StringSet::new(),
require_extends: StringSet::new(),
require_implements: StringSet::new(),
};
type_info_from_class_body(
namespace,
check_require,
decl.attributes,
decl.body,
&mut facts,
&mut decl_facts,
);
// trait uses are already added to base_types, so just add extends & implements
typenames_from_list(decl.extends, namespace, &mut decl_facts.base_types);
typenames_from_list(decl.implements, namespace, &mut decl_facts.base_types);
add_or_update_classish_decl(name, decl_facts, &mut facts.types);
}
}
fn add_or_update_classish_decl(name: String, mut delta: TypeFacts, types: &mut TypeFactsByName) {
types
.entry(name)
.and_modify(|tf| {
if tf.kind != delta.kind {
tf.kind = TypeKind::Mixed;
}
tf.flags = Flag::MultipleDeclarations.set(tf.flags);
tf.flags = Flag::combine(tf.flags, delta.flags);
tf.base_types.append(&mut delta.base_types);
tf.attributes.append(&mut delta.attributes);
tf.require_extends.append(&mut delta.require_extends);
tf.require_implements.append(&mut delta.require_implements);
})
.or_insert_with(|| {
if let TypeKind::Enum = delta.kind {
delta.base_types.insert("HH\\BuiltinEnum".into());
}
delta
});
}
type CollectAcc = (String, Facts);
fn collect(mut acc: CollectAcc, node: Node) -> CollectAcc {
match node {
List(nodes) => acc = nodes.into_iter().fold(acc, collect),
ClassDecl(decl) => {
class_decl_into_facts(*decl, &acc.0, &mut acc.1);
}
EnumDecl(decl) => {
if let Some(name) = qualified_name(&acc.0, decl.name) {
let attributes = attributes_into_facts(&acc.0, decl.attributes);
let enum_facts = TypeFacts {
flags: Flag::Final as isize,
kind: TypeKind::Enum,
attributes,
base_types: StringSet::new(),
require_extends: StringSet::new(),
require_implements: StringSet::new(),
};
add_or_update_classish_decl(name, enum_facts, &mut acc.1.types);
}
}
FunctionDecl(name) => {
if let Some(name) = qualified_name(&acc.0, *name) {
acc.1.functions.push(name);
}
}
ConstDecl(name) => {
if let Some(name) = qualified_name(&acc.0, *name) {
acc.1.constants.push(name);
}
}
TypeAliasDecl(decl) => {
if let Some(name) = qualified_name(&acc.0, decl.name) {
let attributes = attributes_into_facts(&acc.0, decl.attributes);
let type_alias_facts = TypeFacts {
flags: Flag::default() as isize,
kind: TypeKind::TypeAlias,
attributes,
base_types: StringSet::new(),
require_extends: StringSet::new(),
require_implements: StringSet::new(),
};
add_or_update_classish_decl(name.clone(), type_alias_facts, &mut acc.1.types);
acc.1.type_aliases.push(name);
}
}
Define(define) => {
if acc.0.is_empty() {
if let Node::String(ref name) = *define {
acc.1.constants.push(define_name(&name.get()));
}
}
}
NamespaceDecl(name, body) => {
if let Node::EmptyBody = *body {
if let Some(name) = qualified_name("", *name) {
acc.0 = name;
}
} else {
let name = if let Ignored = *name {
Some(acc.0.clone())
} else {
qualified_name(&acc.0, *name)
};
if let Some(name) = name {
acc.1 = collect((name, acc.1), *body).1;
}
}
}
FileAttributeSpecification(attributes) => {
acc.1.file_attributes = attributes_into_facts(&acc.0, *attributes);
}
_ => (),
};
acc
}
#[cfg(test)]
mod tests {
use super::*;
use hhbc_string_utils_rust::without_xhp_mangling;
#[test]
fn xhp_mangling() {
assert_eq!(mangle_xhp_id(":foo".into()), String::from("xhp_foo"));
assert_eq!(mangle_xhp_id("with-dash".into()), String::from("with_dash"));
assert_eq!(
mangle_xhp_id("test:colon".into()),
String::from("test__colon")
);
assert_eq!(
mangle_xhp_id(":a:all-in-one:example".into()),
String::from("xhp_a__all_in_one__example")
);
}
#[test]
fn xhp_mangling_control_allows_nesting() {
let name = String::from(":no:mangling");
without_xhp_mangling(|| {
assert_eq!(mangle_xhp_id(name.clone()), name);
without_xhp_mangling(|| {
assert_eq!(mangle_xhp_id(name.clone()), name);
});
assert_eq!(mangle_xhp_id(name.clone()), name);
});
assert_ne!(mangle_xhp_id(name.clone()), name);
}
}
| 36.992991 | 98 | 0.501926 |
1a96f32f17d1b431a64cc6640ead710911285f7c | 271 | #![feature(type_alias_impl_trait)]
#![allow(dead_code)]
type Bug<T, U> = impl Fn(T) -> U + Copy;
const CONST_BUG: Bug<u8, ()> = unsafe { std::mem::transmute(|_: u8| ()) };
fn make_bug<T, U: From<T>>() -> Bug<T, U> {
|x| x.into()
}
fn main() {
CONST_BUG(0);
}
| 18.066667 | 74 | 0.553506 |
ff7bbc2c6de18e4d28c1f0bfbaf191dfabe0e12e | 5,928 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::S6FCR {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct FEIER {
bits: bool,
}
impl FEIER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FSR {
bits: u8,
}
impl FSR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct DMDISR {
bits: bool,
}
impl DMDISR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FTHR {
bits: u8,
}
impl FTHR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _FEIEW<'a> {
w: &'a mut W,
}
impl<'a> _FEIEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _DMDISW<'a> {
w: &'a mut W,
}
impl<'a> _DMDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FTHW<'a> {
w: &'a mut W,
}
impl<'a> _FTHW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 7 - FIFO error interrupt enable"]
#[inline]
pub fn feie(&self) -> FEIER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FEIER { bits }
}
#[doc = "Bits 3:5 - FIFO status"]
#[inline]
pub fn fs(&self) -> FSR {
let bits = {
const MASK: u8 = 7;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) as u8
};
FSR { bits }
}
#[doc = "Bit 2 - Direct mode disable"]
#[inline]
pub fn dmdis(&self) -> DMDISR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
DMDISR { bits }
}
#[doc = "Bits 0:1 - FIFO threshold selection"]
#[inline]
pub fn fth(&self) -> FTHR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
};
FTHR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 33 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 7 - FIFO error interrupt enable"]
#[inline]
pub fn feie(&mut self) -> _FEIEW {
_FEIEW { w: self }
}
#[doc = "Bit 2 - Direct mode disable"]
#[inline]
pub fn dmdis(&mut self) -> _DMDISW {
_DMDISW { w: self }
}
#[doc = "Bits 0:1 - FIFO threshold selection"]
#[inline]
pub fn fth(&mut self) -> _FTHW {
_FTHW { w: self }
}
}
| 24.195918 | 59 | 0.487517 |
cc38e1b110e5aa62367ff99af37518592cc091a9 | 2,178 | /*
* OpenGL Shaders
*/
pub const VERTEX_SHADER: &str = r#"
#version 100
uniform mat3 matrix;
uniform bool invert_y;
attribute vec2 vert;
attribute vec2 tex_coords;
attribute vec4 position;
varying vec2 v_tex_coords;
mat2 scale(vec2 scale_vec){
return mat2(
scale_vec.x, 0.0,
0.0, scale_vec.y
);
}
void main() {
if (invert_y) {
v_tex_coords = vec2(tex_coords.x, 1.0 - tex_coords.y);
} else {
v_tex_coords = tex_coords;
}
vec2 transform_translation = position.xy;
vec2 transform_scale = position.zw;
v_tex_coords = (vec3((tex_coords * scale(transform_scale)) + transform_translation, 1.0)).xy;
gl_Position = vec4(matrix * vec3((vert * scale(transform_scale)) + transform_translation, 1.0), 1.0);
}
"#;
pub const FRAGMENT_COUNT: usize = 3;
pub const FRAGMENT_SHADER_ABGR: &str = r#"
#version 100
precision mediump float;
uniform sampler2D tex;
uniform float alpha;
varying vec2 v_tex_coords;
void main() {
gl_FragColor = texture2D(tex, v_tex_coords) * alpha;
}
"#;
pub const FRAGMENT_SHADER_XBGR: &str = r#"
#version 100
precision mediump float;
uniform sampler2D tex;
uniform float alpha;
varying vec2 v_tex_coords;
void main() {
gl_FragColor = vec4(texture2D(tex, v_tex_coords).rgb, 1.0) * alpha;
}
"#;
pub const FRAGMENT_SHADER_EXTERNAL: &str = r#"
#version 100
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES tex;
uniform float alpha;
varying vec2 v_tex_coords;
void main() {
gl_FragColor = texture2D(tex, v_tex_coords) * alpha;
}
"#;
pub const VERTEX_SHADER_SOLID: &str = r#"
#version 100
uniform mat3 matrix;
attribute vec2 vert;
attribute vec4 position;
mat2 scale(vec2 scale_vec){
return mat2(
scale_vec.x, 0.0,
0.0, scale_vec.y
);
}
void main() {
vec2 transform_translation = position.xy;
vec2 transform_scale = position.zw;
gl_Position = vec4(matrix * vec3((vert * scale(transform_scale)) + transform_translation, 1.0), 1.0);
}
"#;
pub const FRAGMENT_SHADER_SOLID: &str = r#"
#version 100
precision mediump float;
uniform vec4 color;
void main() {
gl_FragColor = color;
}
"#;
| 19.981651 | 105 | 0.699725 |
ef021cb7d5a4f416eb6fb96cabae79fced0bf802 | 4,986 | #![allow(non_snake_case)]
/*
This file is part of Curv library
Copyright 2018 by Kzen Networks
(https://github.com/KZen-networks/curv)
License MIT: https://github.com/KZen-networks/curv/blob/master/LICENSE
*/
#![no_std]
use std::prelude::v1::*;
use serde::{Deserialize, Serialize};
use zeroize::Zeroize;
use super::ProofError;
use crate::cryptographic_primitives::hashing::hash_sha256::HSha256;
use crate::cryptographic_primitives::hashing::traits::Hash;
use crate::elliptic::curves::traits::*;
/// This is a proof of knowledge that a pair of group elements {D, E}
/// form a valid homomorphic ElGamal encryption (”in the exponent”) using public key Y .
/// (HEG is defined in B. Schoenmakers and P. Tuyls. Practical Two-Party Computation Based on the Conditional Gate)
/// Specifically, the witness is ω = (x, r), the statement is δ = (G, Y, Q, D, E).
/// The relation R outputs 1 if D = xG+rY , E = rG, Q = xG
///
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct HomoELGamalDlogProof<P: ECPoint> {
pub A1: P,
pub A2: P,
pub A3: P,
pub z1: P::Scalar,
pub z2: P::Scalar,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct HomoElGamalDlogWitness<S: ECScalar> {
pub r: S,
pub x: S,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct HomoElGamalDlogStatement<P: ECPoint> {
pub G: P,
pub Y: P,
pub Q: P,
pub D: P,
pub E: P,
}
impl<P> HomoELGamalDlogProof<P>
where
P: ECPoint + Clone,
P::Scalar: Zeroize + Clone,
{
pub fn prove(
w: &HomoElGamalDlogWitness<P::Scalar>,
delta: &HomoElGamalDlogStatement<P>,
) -> HomoELGamalDlogProof<P> {
let mut s1: P::Scalar = ECScalar::new_random();
let mut s2: P::Scalar = ECScalar::new_random();
let A1 = delta.G.clone() * s1.clone();
let A2 = delta.Y.clone() * s2.clone();
let A3 = delta.G.clone() * s2.clone();
let e =
HSha256::create_hash_from_ge(&[&A1, &A2, &A3, &delta.G, &delta.Y, &delta.D, &delta.E]);
let z1 = s1.clone() + e.clone() * w.x.clone();
let z2 = s2.clone() + e * w.r.clone();
s1.zeroize();
s2.zeroize();
HomoELGamalDlogProof { A1, A2, A3, z1, z2 }
}
pub fn verify(&self, delta: &HomoElGamalDlogStatement<P>) -> Result<(), ProofError> {
let e = HSha256::create_hash_from_ge(&[
&self.A1, &self.A2, &self.A3, &delta.G, &delta.Y, &delta.D, &delta.E,
]);
let z1G = delta.G.clone() * self.z1.clone();
let z2Y = delta.Y.clone() * self.z2.clone();
let z2G = delta.G.clone() * self.z2.clone();
let A1_plus_eQ = self.A1.clone() + delta.Q.clone() * e.clone();
let A3_plus_eE = self.A3.clone() + delta.E.clone() * e.clone();
let D_minus_Q = delta.D.sub_point(&delta.Q.get_element());
let A2_plus_eDmQ = self.A2.clone() + D_minus_Q * e;
if z1G == A1_plus_eQ && z2G == A3_plus_eE && z2Y == A2_plus_eDmQ {
Ok(())
} else {
Err(ProofError)
}
}
}
#[cfg(test)]
mod tests {
use crate::cryptographic_primitives::proofs::sigma_correct_homomorphic_elgamal_encryption_of_dlog::*;
use crate::test_for_all_curves;
test_for_all_curves!(test_correct_homo_elgamal);
fn test_correct_homo_elgamal<P>()
where
P: ECPoint + Clone,
P::Scalar: Zeroize + Clone,
{
let witness = HomoElGamalDlogWitness::<P::Scalar> {
r: ECScalar::new_random(),
x: ECScalar::new_random(),
};
let G: P = ECPoint::generator();
let y: P::Scalar = ECScalar::new_random();
let Y = G.clone() * y;
let D = G.clone() * witness.x.clone() + Y.clone() * witness.r.clone();
let E = G.clone() * witness.r.clone();
let Q = G.clone() * witness.x.clone();
let delta = HomoElGamalDlogStatement { G, Y, Q, D, E };
let proof = HomoELGamalDlogProof::prove(&witness, &delta);
assert!(proof.verify(&delta).is_ok());
}
// TODO: add more fail scenarios
test_for_all_curves!(
#[should_panic]
test_wrong_homo_elgamal
);
fn test_wrong_homo_elgamal<P>()
where
P: ECPoint + Clone,
P::Scalar: Zeroize + Clone,
{
// test for Q = (x+1)G
let witness = HomoElGamalDlogWitness::<P::Scalar> {
r: ECScalar::new_random(),
x: ECScalar::new_random(),
};
let G: P = ECPoint::generator();
let y: P::Scalar = ECScalar::new_random();
let Y = G.clone() * y;
let D = G.clone() * witness.x.clone() + Y.clone() * witness.r.clone();
let E = G.clone() * witness.r.clone() + G.clone();
let Q = G.clone() * witness.x.clone() + G.clone();
let delta = HomoElGamalDlogStatement { G, Y, Q, D, E };
let proof = HomoELGamalDlogProof::prove(&witness, &delta);
assert!(proof.verify(&delta).is_ok());
}
}
| 35.112676 | 115 | 0.591657 |
d9a1b03ff660afd8326e969e6e08c9ae42b730e1 | 34,708 | pub const WAVELENGTHS: [i16; 128] = [
1002, 946, 893, 843, 795, 751, 709, 669, 631, 596, 562, 531, 501, 473, 446, 421, 398, 375, 354,
334, 316, 298, 281, 265, 250, 236, 223, 211, 199, 188, 177, 167, 158, 149, 141, 133, 125, 118,
112, 105, 99, 94, 89, 84, 79, 74, 70, 66, 63, 59, 56, 53, 50, 47, 44, 42, 39, 37, 35, 33, 31,
30, 28, 26, 25, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7,
7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
];
pub const NOTES: [i16; 8192] = [
-32768, -93, -81, -74, -69, -66, -62, -60, -57, -55, -54, -52, -50, -49, -48, -46, -45, -44,
-43, -42, -42, -41, -40, -39, -38, -38, -37, -36, -36, -35, -34, -34, -33, -33, -32, -32, -31,
-31, -30, -30, -30, -29, -29, -28, -28, -27, -27, -27, -26, -26, -26, -25, -25, -25, -24, -24,
-24, -23, -23, -23, -22, -22, -22, -22, -21, -21, -21, -21, -20, -20, -20, -20, -19, -19, -19,
-19, -18, -18, -18, -18, -18, -17, -17, -17, -17, -16, -16, -16, -16, -16, -15, -15, -15, -15,
-15, -15, -14, -14, -14, -14, -14, -13, -13, -13, -13, -13, -13, -12, -12, -12, -12, -12, -12,
-12, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9,
-9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6,
-6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3,
-3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57,
57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59,
59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61,
61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62,
62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63,
];
| 98.045198 | 99 | 0.473349 |
89fc6160e321f5f3855cb52dd1dd241dffb05521 | 711 | // example taken from doc for PushSubscription::drain
//
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let sub_name = format!("drain_{}", rand::random::<u32>());
let client = nats_aflowt::connect("127.0.0.1:14222").await?;
let context = nats_aflowt::jetstream::new(client);
context.add_stream(sub_name.as_str()).await?;
let mut subscription = context.subscribe("drain199").await?;
context.publish(&sub_name, "foo").await?;
context.publish(&sub_name, "bar").await?;
context.publish(&sub_name, "baz").await?;
subscription.drain().await?;
assert!(subscription.next().await.is_none(), "subscription ended");
println!("done");
Ok(())
}
| 33.857143 | 71 | 0.654008 |
8f54666b28ff45166a5bc91a2ffee96f08663632 | 7,193 | use super::AbstractMut;
use crate::component::Component;
use crate::entity_id::EntityId;
use crate::not::Not;
use crate::r#mut::Mut;
use crate::sparse_set::{FullRawWindowMut, SparseSet};
use crate::track;
use crate::tracking::InsertedOrModified;
impl<'tmp, T: Component<Tracking = track::Insertion>> AbstractMut
for Not<InsertedOrModified<&'tmp SparseSet<T, track::Insertion>>>
{
type Out = &'tmp T;
type Index = usize;
#[inline]
unsafe fn get_data(&self, index: usize) -> Self::Out {
self.0.get_data(index)
}
#[inline]
unsafe fn get_datas(&self, index: Self::Index) -> Self::Out {
self.0.get_datas(index)
}
#[inline]
fn indices_of(&self, entity_id: EntityId, _: usize, _: u16) -> Option<Self::Index> {
if let Some(index) = self.0 .0.index_of(entity_id) {
let id = unsafe { *self.0 .0.dense.get_unchecked(index) };
if !id.is_inserted() {
Some(index)
} else {
None
}
} else {
None
}
}
#[inline]
unsafe fn indices_of_unchecked(
&self,
entity_id: EntityId,
index: usize,
mask: u16,
) -> Self::Index {
self.0.indices_of_unchecked(entity_id, index, mask)
}
#[inline]
unsafe fn get_id(&self, index: usize) -> EntityId {
self.0.get_id(index)
}
}
impl<'tmp, T: Component<Tracking = track::Modification>> AbstractMut
for Not<InsertedOrModified<&'tmp SparseSet<T, track::Modification>>>
{
type Out = &'tmp T;
type Index = usize;
#[inline]
unsafe fn get_data(&self, index: usize) -> Self::Out {
self.0.get_data(index)
}
#[inline]
unsafe fn get_datas(&self, index: Self::Index) -> Self::Out {
self.0.get_datas(index)
}
#[inline]
fn indices_of(&self, entity_id: EntityId, _: usize, _: u16) -> Option<Self::Index> {
if let Some(index) = self.0 .0.index_of(entity_id) {
let id = unsafe { *self.0 .0.dense.get_unchecked(index) };
if !id.is_modified() {
Some(index)
} else {
None
}
} else {
None
}
}
#[inline]
unsafe fn indices_of_unchecked(
&self,
entity_id: EntityId,
index: usize,
mask: u16,
) -> Self::Index {
self.0.indices_of_unchecked(entity_id, index, mask)
}
#[inline]
unsafe fn get_id(&self, index: usize) -> EntityId {
self.0.get_id(index)
}
}
impl<'tmp, T: Component<Tracking = track::All>> AbstractMut
for Not<InsertedOrModified<&'tmp SparseSet<T, track::All>>>
{
type Out = &'tmp T;
type Index = usize;
#[inline]
unsafe fn get_data(&self, index: usize) -> Self::Out {
self.0.get_data(index)
}
#[inline]
unsafe fn get_datas(&self, index: Self::Index) -> Self::Out {
self.0.get_datas(index)
}
#[inline]
fn indices_of(&self, entity_id: EntityId, _: usize, _: u16) -> Option<Self::Index> {
if let Some(index) = self.0 .0.index_of(entity_id) {
let id = unsafe { *self.0 .0.dense.get_unchecked(index) };
if !id.is_inserted() && !id.is_modified() {
Some(index)
} else {
None
}
} else {
None
}
}
#[inline]
unsafe fn indices_of_unchecked(
&self,
entity_id: EntityId,
index: usize,
mask: u16,
) -> Self::Index {
self.0.indices_of_unchecked(entity_id, index, mask)
}
#[inline]
unsafe fn get_id(&self, index: usize) -> EntityId {
self.0.get_id(index)
}
}
impl<'tmp, T: Component<Tracking = track::Insertion>> AbstractMut
for Not<InsertedOrModified<FullRawWindowMut<'tmp, T, track::Insertion>>>
{
type Out = &'tmp mut T;
type Index = usize;
#[inline]
unsafe fn get_data(&self, index: usize) -> Self::Out {
self.0.get_data(index)
}
#[inline]
unsafe fn get_datas(&self, index: Self::Index) -> Self::Out {
self.0.get_datas(index)
}
#[inline]
fn indices_of(&self, entity_id: EntityId, _: usize, _: u16) -> Option<Self::Index> {
if let Some(index) = self.0 .0.index_of(entity_id) {
let id = unsafe { *self.0 .0.dense.add(index) };
if !id.is_inserted() {
Some(index)
} else {
None
}
} else {
None
}
}
#[inline]
unsafe fn indices_of_unchecked(
&self,
entity_id: EntityId,
index: usize,
mask: u16,
) -> Self::Index {
self.0.indices_of_unchecked(entity_id, index, mask)
}
#[inline]
unsafe fn get_id(&self, index: usize) -> EntityId {
self.0.get_id(index)
}
}
impl<'tmp, T: Component<Tracking = track::Modification>> AbstractMut
for Not<InsertedOrModified<FullRawWindowMut<'tmp, T, track::Modification>>>
{
type Out = Mut<'tmp, T>;
type Index = usize;
#[inline]
unsafe fn get_data(&self, index: usize) -> Self::Out {
self.0.get_data(index)
}
#[inline]
unsafe fn get_datas(&self, index: Self::Index) -> Self::Out {
self.0.get_datas(index)
}
#[inline]
fn indices_of(&self, entity_id: EntityId, _: usize, _: u16) -> Option<Self::Index> {
if let Some(index) = self.0 .0.index_of(entity_id) {
let id = unsafe { *self.0 .0.dense.add(index) };
if !id.is_modified() {
Some(index)
} else {
None
}
} else {
None
}
}
#[inline]
unsafe fn indices_of_unchecked(
&self,
entity_id: EntityId,
index: usize,
mask: u16,
) -> Self::Index {
self.0.indices_of_unchecked(entity_id, index, mask)
}
#[inline]
unsafe fn get_id(&self, index: usize) -> EntityId {
self.0.get_id(index)
}
}
impl<'tmp, T: Component<Tracking = track::All>> AbstractMut
for Not<InsertedOrModified<FullRawWindowMut<'tmp, T, track::All>>>
{
type Out = Mut<'tmp, T>;
type Index = usize;
#[inline]
unsafe fn get_data(&self, index: usize) -> Self::Out {
self.0.get_data(index)
}
#[inline]
unsafe fn get_datas(&self, index: Self::Index) -> Self::Out {
self.0.get_datas(index)
}
#[inline]
fn indices_of(&self, entity_id: EntityId, _: usize, _: u16) -> Option<Self::Index> {
if let Some(index) = self.0 .0.index_of(entity_id) {
let id = unsafe { *self.0 .0.dense.add(index) };
if !id.is_inserted() && !id.is_modified() {
Some(index)
} else {
None
}
} else {
None
}
}
#[inline]
unsafe fn indices_of_unchecked(
&self,
entity_id: EntityId,
index: usize,
mask: u16,
) -> Self::Index {
self.0.indices_of_unchecked(entity_id, index, mask)
}
#[inline]
unsafe fn get_id(&self, index: usize) -> EntityId {
self.0.get_id(index)
}
}
| 26.940075 | 88 | 0.542889 |
bb8dee403ab6d0f090a45c6c176e50f0425a1395 | 31,799 | //!
//! Defines ValueFormat for formatting related issues
//!
//! ```
//! use spreadsheet_ods::{ValueFormat, ValueType};
//! use spreadsheet_ods::format::FormatNumberStyle;
//!
//! let mut v = ValueFormat::new_with_name("dt0", ValueType::DateTime);
//! v.push_day(FormatNumberStyle::Long);
//! v.push_text(".");
//! v.push_month(FormatNumberStyle::Long, false);
//! v.push_text(".");
//! v.push_year(FormatNumberStyle::Long);
//! v.push_text(" ");
//! v.push_hours(FormatNumberStyle::Long);
//! v.push_text(":");
//! v.push_minutes(FormatNumberStyle::Long);
//! v.push_text(":");
//! v.push_seconds(FormatNumberStyle::Long);
//!
//! let mut v = ValueFormat::new_with_name("n3", ValueType::Number);
//! v.push_number(3, false);
//! ```
//! The output formatting is a rough approximation with the possibilities
//! offered by format! and chrono::format. Especially there is no trace of
//! i18n. But on the other hand the formatting rules are applied by LibreOffice
//! when opening the spreadsheet so typically nobody notices this.
//!
use crate::attrmap2::AttrMap2;
use crate::style::stylemap::StyleMap;
use crate::style::units::{
FontStyle, FontWeight, Length, LineMode, LineStyle, LineType, LineWidth, TextPosition,
TextRelief, TextTransform,
};
use crate::style::{
color_string, percent_string, shadow_string, StyleOrigin, StyleUse, TextStyleRef,
};
use crate::ValueType;
use chrono::Duration;
use chrono::NaiveDateTime;
use color::Rgb;
use std::fmt::{Display, Formatter};
/// Error type for any formatting errors.
#[derive(Debug)]
#[allow(missing_docs)]
pub enum ValueFormatError {
Format(String),
NaN,
}
impl Display for ValueFormatError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
ValueFormatError::Format(s) => write!(f, "{}", s)?,
ValueFormatError::NaN => write!(f, "Digit expected")?,
}
Ok(())
}
}
impl std::error::Error for ValueFormatError {}
style_ref!(ValueFormatRef);
/// Actual textual formatting of values.
#[derive(Debug, Clone)]
pub struct ValueFormat {
/// Name
name: String,
country: Option<String>,
language: Option<String>,
script: Option<String>,
/// Value type
v_type: ValueType,
/// Origin information.
origin: StyleOrigin,
/// Usage of this style.
styleuse: StyleUse,
/// Properties of the format.
attr: AttrMap2,
/// Cell text styles
textstyle: AttrMap2,
/// Parts of the format.
parts: Vec<FormatPart>,
/// Style map data.
stylemaps: Option<Vec<StyleMap>>,
}
impl Default for ValueFormat {
fn default() -> Self {
ValueFormat::new()
}
}
impl ValueFormat {
/// New, empty.
pub fn new() -> Self {
ValueFormat {
name: String::from(""),
country: None,
language: None,
script: None,
v_type: ValueType::Text,
origin: StyleOrigin::Styles,
styleuse: StyleUse::Default,
attr: Default::default(),
textstyle: Default::default(),
parts: Default::default(),
stylemaps: None,
}
}
/// New, with name.
pub fn new_with_name<S: Into<String>>(name: S, value_type: ValueType) -> Self {
ValueFormat {
name: name.into(),
country: None,
language: None,
script: None,
v_type: value_type,
origin: StyleOrigin::Styles,
styleuse: StyleUse::Default,
attr: Default::default(),
textstyle: Default::default(),
parts: Default::default(),
stylemaps: None,
}
}
/// Returns a reference name for this value format.
pub fn format_ref(&self) -> ValueFormatRef {
ValueFormatRef::from(self.name().as_str())
}
/// Sets the name.
pub fn set_name<S: Into<String>>(&mut self, name: S) {
self.name = name.into();
}
/// Returns the name.
pub fn name(&self) -> &String {
&self.name
}
/// Sets the country.
pub fn set_country<S: Into<String>>(&mut self, country: S) {
self.country = Some(country.into());
}
/// Country
pub fn country(&self) -> Option<&String> {
self.country.as_ref()
}
/// Sets the language.
pub fn set_language<S: Into<String>>(&mut self, language: S) {
self.language = Some(language.into());
}
/// Language
pub fn language(&self) -> Option<&String> {
self.language.as_ref()
}
/// Sets the Script.
pub fn set_script<S: Into<String>>(&mut self, script: S) {
self.script = Some(script.into());
}
/// Script
pub fn script(&self) -> Option<&String> {
self.script.as_ref()
}
/// Sets the value type.
pub fn set_value_type(&mut self, value_type: ValueType) {
self.v_type = value_type;
}
/// Returns the value type.
pub fn value_type(&self) -> ValueType {
self.v_type
}
/// Sets the origin.
pub fn set_origin(&mut self, origin: StyleOrigin) {
self.origin = origin;
}
/// Returns the origin.
pub fn origin(&self) -> StyleOrigin {
self.origin
}
/// Style usage.
pub fn set_styleuse(&mut self, styleuse: StyleUse) {
self.styleuse = styleuse;
}
/// Returns the usage.
pub fn styleuse(&self) -> StyleUse {
self.styleuse
}
pub(crate) fn attrmap(&self) -> &AttrMap2 {
&self.attr
}
pub(crate) fn attrmap_mut(&mut self) -> &mut AttrMap2 {
&mut self.attr
}
/// Text style attributes.
pub fn textstyle(&self) -> &AttrMap2 {
&self.textstyle
}
/// Text style attributes.
pub fn textstyle_mut(&mut self) -> &mut AttrMap2 {
&mut self.textstyle
}
text!(textstyle_mut);
/// Appends a format part.
pub fn push_boolean(&mut self) {
self.push_part(FormatPart::new_boolean());
}
/// Appends a format part.
pub fn push_number(&mut self, decimal: u8, grouping: bool) {
self.push_part(FormatPart::new_number(decimal, grouping));
}
/// Appends a format part.
pub fn push_number_fix(&mut self, decimal: u8, grouping: bool) {
self.push_part(FormatPart::new_number_fix(decimal, grouping));
}
/// Appends a format part.
pub fn push_fraction(
&mut self,
denominator: u32,
min_den_digits: u8,
min_int_digits: u8,
min_num_digits: u8,
grouping: bool,
) {
self.push_part(FormatPart::new_fraction(
denominator,
min_den_digits,
min_int_digits,
min_num_digits,
grouping,
));
}
/// Appends a format part.
pub fn push_scientific(&mut self, dec_places: u8) {
self.push_part(FormatPart::new_scientific(dec_places));
}
/// Appends a format part.
pub fn push_currency<S1, S2, S3>(&mut self, country: S1, language: S2, symbol: S3)
where
S1: Into<String>,
S2: Into<String>,
S3: Into<String>,
{
self.push_part(FormatPart::new_currency(country, language, symbol));
}
/// Appends a format part.
pub fn push_day(&mut self, number: FormatNumberStyle) {
self.push_part(FormatPart::new_day(number));
}
/// Appends a format part.
pub fn push_month(&mut self, number: FormatNumberStyle, text: bool) {
self.push_part(FormatPart::new_month(number, text));
}
/// Appends a format part.
pub fn push_year(&mut self, number: FormatNumberStyle) {
self.push_part(FormatPart::new_year(number));
}
/// Appends a format part.
pub fn push_era(&mut self, number: FormatNumberStyle, calendar: FormatCalendarStyle) {
self.push_part(FormatPart::new_era(number, calendar));
}
/// Appends a format part.
pub fn push_day_of_week(&mut self, number: FormatNumberStyle, calendar: FormatCalendarStyle) {
self.push_part(FormatPart::new_day_of_week(number, calendar));
}
/// Appends a format part.
pub fn push_week_of_year(&mut self, calendar: FormatCalendarStyle) {
self.push_part(FormatPart::new_week_of_year(calendar));
}
/// Appends a format part.
pub fn push_quarter(&mut self, number: FormatNumberStyle, calendar: FormatCalendarStyle) {
self.push_part(FormatPart::new_quarter(number, calendar));
}
/// Appends a format part.
pub fn push_hours(&mut self, number: FormatNumberStyle) {
self.push_part(FormatPart::new_hours(number));
}
/// Appends a format part.
pub fn push_minutes(&mut self, number: FormatNumberStyle) {
self.push_part(FormatPart::new_minutes(number));
}
/// Appends a format part.
pub fn push_seconds(&mut self, number: FormatNumberStyle) {
self.push_part(FormatPart::new_seconds(number));
}
/// Appends a format part.
pub fn push_am_pm(&mut self) {
self.push_part(FormatPart::new_am_pm());
}
/// Appends a format part.
pub fn push_embedded_text(&mut self, position: u8) {
self.push_part(FormatPart::new_embedded_text(position));
}
/// Appends a format part.
pub fn push_text<S: Into<String>>(&mut self, text: S) {
self.push_part(FormatPart::new_text(text));
}
/// Appends a format part.
pub fn push_text_content(&mut self) {
self.push_part(FormatPart::new_text_content());
}
/// Adds a format part.
pub fn push_part(&mut self, part: FormatPart) {
self.parts.push(part);
}
/// Adds all format parts.
pub fn push_parts(&mut self, partvec: &mut Vec<FormatPart>) {
self.parts.append(partvec);
}
/// Returns the parts.
pub fn parts(&self) -> &Vec<FormatPart> {
&self.parts
}
/// Returns the mutable parts.
pub fn parts_mut(&mut self) -> &mut Vec<FormatPart> {
&mut self.parts
}
/// Adds a stylemap.
pub fn push_stylemap(&mut self, stylemap: StyleMap) {
self.stylemaps.get_or_insert_with(Vec::new).push(stylemap);
}
/// Returns the stylemaps
pub fn stylemaps(&self) -> Option<&Vec<StyleMap>> {
self.stylemaps.as_ref()
}
/// Returns the mutable stylemap.
pub fn stylemaps_mut(&mut self) -> &mut Vec<StyleMap> {
self.stylemaps.get_or_insert_with(Vec::new)
}
/// Tries to format.
/// If there are no matching parts, does nothing.
pub fn format_boolean(&self, b: bool) -> String {
let mut buf = String::new();
for p in &self.parts {
p.format_boolean(&mut buf, b);
}
buf
}
/// Tries to format.
/// If there are no matching parts, does nothing.
pub fn format_float(&self, f: f64) -> String {
let mut buf = String::new();
for p in &self.parts {
p.format_float(&mut buf, f);
}
buf
}
/// Tries to format.
/// If there are no matching parts, does nothing.
pub fn format_str<'a, S: Into<&'a str>>(&self, s: S) -> String {
let mut buf = String::new();
let s = s.into();
for p in &self.parts {
p.format_str(&mut buf, s);
}
buf
}
/// Tries to format.
/// If there are no matching parts, does nothing.
/// Should work reasonably. Don't ask me about other calenders.
pub fn format_datetime(&self, d: &NaiveDateTime) -> String {
let mut buf = String::new();
let h12 = self
.parts
.iter()
.any(|v| v.part_type == FormatPartType::AmPm);
for p in &self.parts {
p.format_datetime(&mut buf, d, h12);
}
buf
}
/// Tries to format. Should work reasonably.
/// If there are no matching parts, does nothing.
pub fn format_time_duration(&self, d: &Duration) -> String {
let mut buf = String::new();
for p in &self.parts {
p.format_time_duration(&mut buf, d);
}
buf
}
}
/// Identifies the structural parts of a value format.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum FormatPartType {
Boolean,
Number,
Fraction,
Scientific,
CurrencySymbol,
Day,
Month,
Year,
Era,
DayOfWeek,
WeekOfYear,
Quarter,
Hours,
Minutes,
Seconds,
AmPm,
EmbeddedText,
Text,
TextContent,
}
/// One structural part of a value format.
#[derive(Debug, Clone)]
pub struct FormatPart {
/// What kind of format part is this?
part_type: FormatPartType,
/// Properties of this part.
attr: AttrMap2,
/// Some content.
content: Option<String>,
}
/// Flag for several PartTypes.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum FormatNumberStyle {
Short,
Long,
}
impl Display for FormatNumberStyle {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
FormatNumberStyle::Short => write!(f, "short"),
FormatNumberStyle::Long => write!(f, "long"),
}
}
}
/// Calendar types.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(missing_docs)]
pub enum FormatCalendarStyle {
Gregorian,
Gengou,
Roc,
Hanja,
Hijri,
Jewish,
Buddhist,
}
impl Display for FormatCalendarStyle {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
FormatCalendarStyle::Gregorian => write!(f, "gregorian"),
FormatCalendarStyle::Gengou => write!(f, "gengou"),
FormatCalendarStyle::Roc => write!(f, "ROC"),
FormatCalendarStyle::Hanja => write!(f, "hanja"),
FormatCalendarStyle::Hijri => write!(f, "hijri"),
FormatCalendarStyle::Jewish => write!(f, "jewish"),
FormatCalendarStyle::Buddhist => write!(f, "buddhist"),
}
}
}
impl FormatPart {
/// New, empty
pub fn new(ftype: FormatPartType) -> Self {
FormatPart {
part_type: ftype,
attr: Default::default(),
content: None,
}
}
/// New, with string content.
pub fn new_with_content<S: Into<String>>(ftype: FormatPartType, content: S) -> Self {
FormatPart {
part_type: ftype,
attr: Default::default(),
content: Some(content.into()),
}
}
/// Boolean Part
pub fn new_boolean() -> Self {
FormatPart::new(FormatPartType::Boolean)
}
/// Number format part.
pub fn new_number(decimal: u8, grouping: bool) -> Self {
let mut p = FormatPart::new(FormatPartType::Number);
p.set_attr("number:min-integer-digits", 1.to_string());
p.set_attr("number:decimal-places", decimal.to_string());
p.set_attr("number:min-decimal-places", 0.to_string());
if grouping {
p.set_attr("number:grouping", String::from("true"));
}
p
}
/// Number format part with fixed decimal places.
pub fn new_number_fix(decimal: u8, grouping: bool) -> Self {
let mut p = Self::new(FormatPartType::Number);
p.set_attr("number:min-integer-digits", 1.to_string());
p.set_attr("number:decimal-places", decimal.to_string());
p.set_attr("number:min-decimal-places", decimal.to_string());
if grouping {
p.set_attr("number:grouping", String::from("true"));
}
p
}
/// Format as a fraction.
pub fn new_fraction(
denominator: u32,
min_den_digits: u8,
min_int_digits: u8,
min_num_digits: u8,
grouping: bool,
) -> Self {
let mut p = Self::new(FormatPartType::Fraction);
p.set_attr("number:denominator-value", denominator.to_string());
p.set_attr("number:min-denominator-digits", min_den_digits.to_string());
p.set_attr("number:min-integer-digits", min_int_digits.to_string());
p.set_attr("number:min-numerator-digits", min_num_digits.to_string());
if grouping {
p.set_attr("number:grouping", String::from("true"));
}
p
}
/// Format with scientific notation.
pub fn new_scientific(dec_places: u8) -> Self {
let mut p = Self::new(FormatPartType::Scientific);
p.set_attr("number:decimal-places", dec_places.to_string());
p
}
/// Currency symbol.
pub fn new_currency<S1, S2, S3>(country: S1, language: S2, symbol: S3) -> Self
where
S1: Into<String>,
S2: Into<String>,
S3: Into<String>,
{
let mut p = Self::new_with_content(FormatPartType::CurrencySymbol, symbol);
p.set_attr("number:country", country.into());
p.set_attr("number:language", language.into());
p
}
/// Create a part for a date.
pub fn new_day(number: FormatNumberStyle) -> Self {
let mut p = Self::new(FormatPartType::Day);
p.set_attr("number:style", number.to_string());
p
}
/// Create a part for a month.
pub fn new_month(number: FormatNumberStyle, text: bool) -> Self {
let mut p = Self::new(FormatPartType::Month);
p.set_attr("number:style", number.to_string());
p.set_attr("number:textual", text.to_string());
p
}
/// Create a part for a year.
pub fn new_year(number: FormatNumberStyle) -> Self {
let mut p = Self::new(FormatPartType::Year);
p.set_attr("number:style", number.to_string());
p
}
/// Create a part for a era marker.
pub fn new_era(number: FormatNumberStyle, calendar: FormatCalendarStyle) -> Self {
let mut p = Self::new(FormatPartType::Era);
p.set_attr("number:style", number.to_string());
p.set_attr("number:calendar", calendar.to_string());
p
}
/// Create a part for a week day.
pub fn new_day_of_week(number: FormatNumberStyle, calendar: FormatCalendarStyle) -> Self {
let mut p = Self::new(FormatPartType::DayOfWeek);
p.set_attr("number:style", number.to_string());
p.set_attr("number:calendar", calendar.to_string());
p
}
/// Create a part for the week of year.
pub fn new_week_of_year(calendar: FormatCalendarStyle) -> Self {
let mut p = Self::new(FormatPartType::WeekOfYear);
p.set_attr("number:calendar", calendar.to_string());
p
}
/// Create a part for a quarter of a year.
pub fn new_quarter(number: FormatNumberStyle, calendar: FormatCalendarStyle) -> Self {
let mut p = Self::new(FormatPartType::Quarter);
p.set_attr("number:style", number.to_string());
p.set_attr("number:calendar", calendar.to_string());
p
}
/// Create a part for hours.
pub fn new_hours(number: FormatNumberStyle) -> Self {
let mut p = Self::new(FormatPartType::Hours);
p.set_attr("number:style", number.to_string());
p
}
/// Create a part for minutes.
pub fn new_minutes(number: FormatNumberStyle) -> Self {
let mut p = Self::new(FormatPartType::Minutes);
p.set_attr("number:style", number.to_string());
p
}
/// Create a part for seconds.
pub fn new_seconds(number: FormatNumberStyle) -> Self {
let mut p = Self::new(FormatPartType::Seconds);
p.set_attr("number:style", number.to_string());
p
}
/// Create a part for a AM/PM marker.
pub fn new_am_pm() -> Self {
Self::new(FormatPartType::AmPm)
}
/// Whatever this is for ...
pub fn new_embedded_text(position: u8) -> Self {
let mut p = Self::new(FormatPartType::EmbeddedText);
p.set_attr("number:position", position.to_string());
p
}
/// Part with fixed text.
pub fn new_text<S: Into<String>>(text: S) -> Self {
Self::new_with_content(FormatPartType::Text, text)
}
/// Whatever this is for ...
pub fn new_text_content() -> Self {
Self::new(FormatPartType::TextContent)
}
/// Sets the kind of the part.
pub fn set_part_type(&mut self, p_type: FormatPartType) {
self.part_type = p_type;
}
/// What kind of part?
pub fn part_type(&self) -> FormatPartType {
self.part_type
}
/// General attributes.
pub fn attrmap(&self) -> &AttrMap2 {
&self.attr
}
/// General attributes.
pub fn attrmap_mut(&mut self) -> &mut AttrMap2 {
&mut self.attr
}
/// Adds an attribute.
pub fn set_attr(&mut self, name: &str, value: String) {
self.attr.set_attr(name, value);
}
/// Returns a property or a default.
pub fn attr_def<'a0, 'a1, S0, S1>(&'a1 self, name: S0, default: S1) -> &'a1 str
where
S0: Into<&'a0 str>,
S1: Into<&'a1 str>,
{
if let Some(v) = self.attr.attr(name.into()) {
v
} else {
default.into()
}
}
/// Sets a textual content for this part. This is only used
/// for text and currency-symbol.
pub fn set_content<S: Into<String>>(&mut self, content: S) {
self.content = Some(content.into());
}
/// Returns the text content.
pub fn content(&self) -> Option<&String> {
self.content.as_ref()
}
/// Tries to format the given boolean, and appends the result to buf.
/// If this part does'nt match does nothing
fn format_boolean(&self, buf: &mut String, b: bool) {
match self.part_type {
FormatPartType::Boolean => {
buf.push_str(if b { "true" } else { "false" });
}
FormatPartType::Text => {
if let Some(content) = &self.content {
buf.push_str(content)
}
}
_ => {}
}
}
/// Tries to format the given float, and appends the result to buf.
/// If this part does'nt match does nothing
fn format_float(&self, buf: &mut String, f: f64) {
match self.part_type {
FormatPartType::Number => {
let dec = self.attr_def("number:decimal-places", "0").parse::<usize>();
if let Ok(dec) = dec {
buf.push_str(&format!("{:.*}", dec, f));
}
}
FormatPartType::Scientific => {
buf.push_str(&format!("{:e}", f));
}
FormatPartType::CurrencySymbol => {
if let Some(content) = &self.content {
buf.push_str(content)
}
}
FormatPartType::Text => {
if let Some(content) = &self.content {
buf.push_str(content)
}
}
_ => {}
}
}
/// Tries to format the given string, and appends the result to buf.
/// If this part does'nt match does nothing
fn format_str(&self, buf: &mut String, s: &str) {
match self.part_type {
FormatPartType::TextContent => {
buf.push_str(s);
}
FormatPartType::Text => {
if let Some(content) = &self.content {
buf.push_str(content)
}
}
_ => {}
}
}
/// Tries to format the given DateTime, and appends the result to buf.
/// Uses chrono::strftime for the implementation.
/// If this part does'nt match does nothing
#[allow(clippy::collapsible_else_if)]
fn format_datetime(&self, buf: &mut String, d: &NaiveDateTime, h12: bool) {
match self.part_type {
FormatPartType::Day => {
let is_long = self.attr_def("number:style", "") == "long";
if is_long {
buf.push_str(&d.format("%d").to_string());
} else {
buf.push_str(&d.format("%-d").to_string());
}
}
FormatPartType::Month => {
let is_long = self.attr_def("number:style", "") == "long";
let is_text = self.attr_def("number:textual", "") == "true";
if is_text {
if is_long {
buf.push_str(&d.format("%b").to_string());
} else {
buf.push_str(&d.format("%B").to_string());
}
} else {
if is_long {
buf.push_str(&d.format("%m").to_string());
} else {
buf.push_str(&d.format("%-m").to_string());
}
}
}
FormatPartType::Year => {
let is_long = self.attr_def("number:style", "") == "long";
if is_long {
buf.push_str(&d.format("%Y").to_string());
} else {
buf.push_str(&d.format("%y").to_string());
}
}
FormatPartType::DayOfWeek => {
let is_long = self.attr_def("number:style", "") == "long";
if is_long {
buf.push_str(&d.format("%A").to_string());
} else {
buf.push_str(&d.format("%a").to_string());
}
}
FormatPartType::WeekOfYear => {
let is_long = self.attr_def("number:style", "") == "long";
if is_long {
buf.push_str(&d.format("%W").to_string());
} else {
buf.push_str(&d.format("%-W").to_string());
}
}
FormatPartType::Hours => {
let is_long = self.attr_def("number:style", "") == "long";
if !h12 {
if is_long {
buf.push_str(&d.format("%H").to_string());
} else {
buf.push_str(&d.format("%-H").to_string());
}
} else {
if is_long {
buf.push_str(&d.format("%I").to_string());
} else {
buf.push_str(&d.format("%-I").to_string());
}
}
}
FormatPartType::Minutes => {
let is_long = self.attr_def("number:style", "") == "long";
if is_long {
buf.push_str(&d.format("%M").to_string());
} else {
buf.push_str(&d.format("%-M").to_string());
}
}
FormatPartType::Seconds => {
let is_long = self.attr_def("number:style", "") == "long";
if is_long {
buf.push_str(&d.format("%S").to_string());
} else {
buf.push_str(&d.format("%-S").to_string());
}
}
FormatPartType::AmPm => {
buf.push_str(&d.format("%p").to_string());
}
FormatPartType::Text => {
if let Some(content) = &self.content {
buf.push_str(content)
}
}
_ => {}
}
}
/// Tries to format the given Duration, and appends the result to buf.
/// If this part does'nt match does nothing
fn format_time_duration(&self, buf: &mut String, d: &Duration) {
match self.part_type {
FormatPartType::Hours => {
buf.push_str(&d.num_hours().to_string());
}
FormatPartType::Minutes => {
buf.push_str(&(d.num_minutes() % 60).to_string());
}
FormatPartType::Seconds => {
buf.push_str(&(d.num_seconds() % 60).to_string());
}
FormatPartType::Text => {
if let Some(content) = &self.content {
buf.push_str(content)
}
}
_ => {}
}
}
}
/// Creates a new number format.
pub fn create_boolean_format<S: Into<String>>(name: S) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::Boolean);
v.push_boolean();
v
}
/// Creates a new number format.
pub fn create_number_format<S: Into<String>>(name: S, decimal: u8, grouping: bool) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::Number);
v.push_number(decimal, grouping);
v
}
/// Creates a new number format with a fixed number of decimal places.
pub fn create_number_format_fixed<S: Into<String>>(
name: S,
decimal: u8,
grouping: bool,
) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::Number);
v.push_number_fix(decimal, grouping);
v
}
/// Creates a new percantage format.<
pub fn create_percentage_format<S: Into<String>>(name: S, decimal: u8) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::Percentage);
v.push_number_fix(decimal, false);
v.push_text("%");
v
}
/// Creates a new currency format.
pub fn create_currency_prefix<S1, S2, S3, S4>(
name: S1,
country: S2,
language: S3,
symbol: S4,
) -> ValueFormat
where
S1: Into<String>,
S2: Into<String>,
S3: Into<String>,
S4: Into<String>,
{
let mut v = ValueFormat::new_with_name(name.into(), ValueType::Currency);
v.push_currency(country.into(), language.into(), symbol.into());
v.push_text(" ");
v.push_number_fix(2, true);
v
}
/// Creates a new currency format.
pub fn create_currency_suffix<S1, S2, S3, S4>(
name: S1,
country: S2,
language: S3,
symbol: S4,
) -> ValueFormat
where
S1: Into<String>,
S2: Into<String>,
S3: Into<String>,
S4: Into<String>,
{
let mut v = ValueFormat::new_with_name(name.into(), ValueType::Currency);
v.push_number_fix(2, true);
v.push_text(" ");
v.push_currency(country.into(), language.into(), symbol.into());
v
}
/// Creates a new date format D.M.Y
pub fn create_date_dmy_format<S: Into<String>>(name: S) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::DateTime);
v.push_day(FormatNumberStyle::Long);
v.push_text(".");
v.push_month(FormatNumberStyle::Long, false);
v.push_text(".");
v.push_year(FormatNumberStyle::Long);
v
}
/// Creates a new date format M/D/Y
pub fn create_date_mdy_format<S: Into<String>>(name: S) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::DateTime);
v.push_month(FormatNumberStyle::Long, false);
v.push_text("/");
v.push_day(FormatNumberStyle::Long);
v.push_text("/");
v.push_year(FormatNumberStyle::Long);
v
}
/// Creates a datetime format Y-M-D H:M:S
pub fn create_datetime_format<S: Into<String>>(name: S) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::DateTime);
v.push_day(FormatNumberStyle::Long);
v.push_text(".");
v.push_month(FormatNumberStyle::Long, false);
v.push_text(".");
v.push_year(FormatNumberStyle::Long);
v.push_text(" ");
v.push_hours(FormatNumberStyle::Long);
v.push_text(":");
v.push_minutes(FormatNumberStyle::Long);
v.push_text(":");
v.push_seconds(FormatNumberStyle::Long);
v
}
/// Creates a new time-Duration format H:M:S
pub fn create_time_format<S: Into<String>>(name: S) -> ValueFormat {
let mut v = ValueFormat::new_with_name(name.into(), ValueType::TimeDuration);
v.push_hours(FormatNumberStyle::Long);
v.push_text(":");
v.push_minutes(FormatNumberStyle::Long);
v.push_text(":");
v.push_seconds(FormatNumberStyle::Long);
v
}
| 30.141232 | 99 | 0.565238 |
14c97f83b45916eb8e934f22baaa326c7ecfae62 | 91,675 | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The Ethernet protocol.
use alloc::collections::HashMap;
use alloc::collections::VecDeque;
use alloc::vec::{IntoIter, Vec};
use core::fmt::Debug;
use core::iter::FilterMap;
use core::num::NonZeroU8;
use core::slice::Iter;
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, Ip, IpAddr, IpAddress, IpVersion, Ipv4, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{
BroadcastAddress, LinkLocalAddr, LinkLocalAddress, MulticastAddr, MulticastAddress,
SpecifiedAddr, UnicastAddress, Witness,
};
use packet::{Buf, BufferMut, EmptyBuf, Nested, Serializer};
use packet_formats::arp::{peek_arp_types, ArpHardwareType, ArpNetworkType};
use packet_formats::ethernet::{
EtherType, EthernetFrame, EthernetFrameBuilder, EthernetFrameLengthCheck, EthernetIpExt,
};
use specialize_ip_macro::specialize_ip_address;
use crate::context::{DualStateContext, FrameContext, InstantContext, StateContext, TimerHandler};
use crate::device::arp::{
self, ArpContext, ArpDeviceIdContext, ArpFrameMetadata, ArpState, ArpTimerId,
};
use crate::device::link::LinkDevice;
use crate::device::ndp::{self, NdpContext, NdpHandler, NdpState, NdpTimerId};
use crate::device::{
AddressConfigurationType, AddressEntry, AddressError, AddressState, BufferIpDeviceContext,
DeviceIdContext, FrameDestination, IpDeviceContext, RecvIpFrameMeta, Tentative,
};
use crate::ip::gmp::igmp::{
IgmpContext, IgmpGroupState, IgmpHandler, IgmpPacketMetadata, IgmpTimerId,
};
use crate::ip::gmp::mld::{
MldContext, MldFrameMetadata, MldGroupState, MldHandler, MldReportDelay,
};
use crate::ip::gmp::{GroupJoinResult, GroupLeaveResult, MulticastGroupSet};
#[cfg(test)]
use crate::Context;
use crate::Instant;
const ETHERNET_MAX_PENDING_FRAMES: usize = 10;
impl From<Mac> for FrameDestination {
fn from(mac: Mac) -> FrameDestination {
if mac.is_broadcast() {
FrameDestination::Broadcast
} else if mac.is_multicast() {
FrameDestination::Multicast
} else {
debug_assert!(mac.is_unicast());
FrameDestination::Unicast
}
}
}
/// A shorthand for `IpDeviceContext` with all of the appropriate type arguments
/// fixed to their Ethernet values.
pub(crate) trait EthernetIpDeviceContext:
IpDeviceContext<
EthernetLinkDevice,
EthernetTimerId<<Self as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetDeviceState<<Self as InstantContext>::Instant>,
>
{
}
impl<
C: IpDeviceContext<
EthernetLinkDevice,
EthernetTimerId<<C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetDeviceState<<C as InstantContext>::Instant>,
>,
> EthernetIpDeviceContext for C
{
}
/// A shorthand for `BufferIpDeviceContext` with all of the appropriate type
/// arguments fixed to their Ethernet values.
pub(super) trait BufferEthernetIpDeviceContext<B: BufferMut>:
BufferIpDeviceContext<
EthernetLinkDevice,
EthernetTimerId<<Self as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetDeviceState<<Self as InstantContext>::Instant>,
B,
>
{
}
impl<
B: BufferMut,
C: BufferIpDeviceContext<
EthernetLinkDevice,
EthernetTimerId<<C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetDeviceState<<C as InstantContext>::Instant>,
B,
>,
> BufferEthernetIpDeviceContext<B> for C
{
}
impl<C: EthernetIpDeviceContext>
DualStateContext<MulticastGroupSet<Ipv4Addr, IgmpGroupState<C::Instant>>, C::Rng, C::DeviceId>
for C
{
fn get_states_with(
&self,
device: C::DeviceId,
_id1: (),
) -> (&MulticastGroupSet<Ipv4Addr, IgmpGroupState<C::Instant>>, &C::Rng) {
let (state, rng) = self.get_states_with(device, ());
(&state.ip().ipv4_multicast_groups, rng)
}
fn get_states_mut_with(
&mut self,
device: C::DeviceId,
_id1: (),
) -> (&mut MulticastGroupSet<Ipv4Addr, IgmpGroupState<C::Instant>>, &mut C::Rng) {
let (state, rng) = self.get_states_mut_with(device, ());
(&mut state.ip_mut().ipv4_multicast_groups, rng)
}
}
impl<C: EthernetIpDeviceContext>
DualStateContext<MulticastGroupSet<Ipv6Addr, MldGroupState<C::Instant>>, C::Rng, C::DeviceId>
for C
{
fn get_states_with(
&self,
device: C::DeviceId,
_id1: (),
) -> (&MulticastGroupSet<Ipv6Addr, MldGroupState<C::Instant>>, &C::Rng) {
let (state, rng) = self.get_states_with(device, ());
(&state.ip().ipv6_multicast_groups, rng)
}
fn get_states_mut_with(
&mut self,
device: C::DeviceId,
_id1: (),
) -> (&mut MulticastGroupSet<Ipv6Addr, MldGroupState<C::Instant>>, &mut C::Rng) {
let (state, rng) = self.get_states_mut_with(device, ());
(&mut state.ip_mut().ipv6_multicast_groups, rng)
}
}
impl<C: EthernetIpDeviceContext> FrameContext<EmptyBuf, IgmpPacketMetadata<C::DeviceId>> for C {
fn send_frame<S: Serializer<Buffer = EmptyBuf>>(
&mut self,
meta: IgmpPacketMetadata<C::DeviceId>,
body: S,
) -> Result<(), S> {
send_ip_frame(self, meta.device, meta.dst_ip.into_specified(), body)
}
}
impl<C: EthernetIpDeviceContext> FrameContext<EmptyBuf, MldFrameMetadata<C::DeviceId>> for C {
fn send_frame<S: Serializer<Buffer = EmptyBuf>>(
&mut self,
meta: MldFrameMetadata<C::DeviceId>,
body: S,
) -> Result<(), S> {
send_ip_frame(self, meta.device, meta.dst_ip.into_specified(), body)
}
}
impl<C: EthernetIpDeviceContext> IgmpContext<EthernetLinkDevice> for C {
fn get_ip_addr_subnet(&self, device: C::DeviceId) -> Option<AddrSubnet<Ipv4Addr>> {
get_ip_addr_subnet(self, device)
}
fn igmp_enabled(&self, device: C::DeviceId) -> bool {
self.get_state_with(device).ip().igmp_enabled
}
}
impl<C: EthernetIpDeviceContext> MldContext<EthernetLinkDevice> for C {
fn get_ipv6_link_local_addr(&self, device: C::DeviceId) -> Option<LinkLocalAddr<Ipv6Addr>> {
get_ipv6_link_local_addr(self, device)
}
fn mld_enabled(&self, device: C::DeviceId) -> bool {
self.get_state_with(device).ip().mld_enabled
}
}
/// Builder for [`EthernetDeviceState`].
pub(crate) struct EthernetDeviceStateBuilder {
mac: Mac,
mtu: u32,
ndp_configs: ndp::NdpConfigurations,
}
impl EthernetDeviceStateBuilder {
/// Create a new `EthernetDeviceStateBuilder`.
pub(crate) fn new(mac: Mac, mtu: u32) -> Self {
// TODO(joshlf): Add a minimum MTU for all Ethernet devices such that
// you cannot create an `EthernetDeviceState` with an MTU smaller than
// the minimum. The absolute minimum needs to be at least the minimum
// body size of an Ethernet frame. For IPv6-capable devices, the
// minimum needs to be higher - the IPv6 minimum MTU. The easy path is
// to simply use the IPv6 minimum MTU as the minimum in all cases,
// although we may at some point want to figure out how to configure
// devices which don't support IPv6, and allow smaller MTUs for those
// devices.
//
// A few questions:
// - How do we wire error information back up the call stack? Should
// this just return a Result or something?
Self { mac, mtu, ndp_configs: ndp::NdpConfigurations::default() }
}
/// Update the NDP configurations that will be set on the ethernet device.
pub(crate) fn set_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.ndp_configs = v;
}
/// Build the `EthernetDeviceState` from this builder.
pub(super) fn build<I: Instant>(self) -> EthernetDeviceState<I> {
EthernetDeviceState {
mac: self.mac,
mtu: self.mtu,
hw_mtu: self.mtu,
link_multicast_groups: HashMap::new(),
ipv4_arp: ArpState::default(),
ndp: NdpState::new(self.ndp_configs),
pending_frames: HashMap::new(),
promiscuous_mode: false,
}
}
}
/// The state associated with an Ethernet device.
pub(crate) struct EthernetDeviceState<I: Instant> {
/// Mac address of the device this state is for.
mac: Mac,
/// The value this netstack assumes as the device's current MTU.
mtu: u32,
/// The maximum MTU allowed by the hardware.
///
/// `mtu` MUST NEVER be greater than `hw_mtu`.
hw_mtu: u32,
/// Link multicast groups this device has joined.
link_multicast_groups: HashMap<MulticastAddr<Mac>, usize>,
/// IPv4 ARP state.
ipv4_arp: ArpState<EthernetLinkDevice, Ipv4Addr>,
/// (IPv6) NDP state.
ndp: ndp::NdpState<EthernetLinkDevice, I>,
// pending_frames stores a list of serialized frames indexed by their
// desintation IP addresses. The frames contain an entire EthernetFrame
// body and the MTU check is performed before queueing them here.
pending_frames: HashMap<IpAddr, VecDeque<Buf<Vec<u8>>>>,
/// A flag indicating whether the device will accept all ethernet frames that it receives,
/// regardless of the ethernet frame's destination MAC address.
promiscuous_mode: bool,
}
impl<I: Instant> EthernetDeviceState<I> {
/// Adds a pending frame `frame` associated with `local_addr` to the list
/// of pending frames in the current device state.
///
/// If an older frame had to be dropped because it exceeds the maximum
/// allowed number of pending frames, it is returned.
fn add_pending_frame(
&mut self,
local_addr: IpAddr,
frame: Buf<Vec<u8>>,
) -> Option<Buf<Vec<u8>>> {
let buff = self.pending_frames.entry(local_addr).or_insert_with(Default::default);
buff.push_back(frame);
if buff.len() > ETHERNET_MAX_PENDING_FRAMES {
buff.pop_front()
} else {
None
}
}
/// Takes all pending frames associated with address `local_addr`.
fn take_pending_frames(
&mut self,
local_addr: IpAddr,
) -> Option<impl Iterator<Item = Buf<Vec<u8>>>> {
match self.pending_frames.remove(&local_addr) {
Some(buff) => Some(buff.into_iter()),
None => None,
}
}
/// Is a packet with a destination MAC address, `dst`, destined for this device?
///
/// Returns `true` if this device is has `dst_mac` as its assigned MAC address, `dst_mac` is the
/// broadcast MAC address, or it is one of the multicast MAC addresses the device has joined.
fn should_accept(&self, dst_mac: &Mac) -> bool {
(self.mac == *dst_mac)
|| dst_mac.is_broadcast()
|| (MulticastAddr::new(*dst_mac)
.map(|a| self.link_multicast_groups.contains_key(&a))
.unwrap_or(false))
}
/// Should a packet with destination MAC address, `dst`, be accepted by this device?
///
/// Returns `true` if this device is in promiscuous mode or the frame is destined for this
/// device.
fn should_deliver(&self, dst_mac: &Mac) -> bool {
self.promiscuous_mode || self.should_accept(dst_mac)
}
}
/// A timer ID for Ethernet devices.
///
/// `D` is the type of device ID that identifies different Ethernet devices.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum EthernetTimerId<D> {
Arp(ArpTimerId<EthernetLinkDevice, Ipv4Addr, D>),
Ndp(NdpTimerId<EthernetLinkDevice, D>),
Igmp(IgmpTimerId<EthernetLinkDevice, D>),
Mld(MldReportDelay<EthernetLinkDevice, D>),
}
impl<D> From<ArpTimerId<EthernetLinkDevice, Ipv4Addr, D>> for EthernetTimerId<D> {
fn from(id: ArpTimerId<EthernetLinkDevice, Ipv4Addr, D>) -> EthernetTimerId<D> {
EthernetTimerId::Arp(id)
}
}
impl<D> From<NdpTimerId<EthernetLinkDevice, D>> for EthernetTimerId<D> {
fn from(id: NdpTimerId<EthernetLinkDevice, D>) -> EthernetTimerId<D> {
EthernetTimerId::Ndp(id)
}
}
impl<D> From<IgmpTimerId<EthernetLinkDevice, D>> for EthernetTimerId<D> {
fn from(id: IgmpTimerId<EthernetLinkDevice, D>) -> EthernetTimerId<D> {
EthernetTimerId::Igmp(id)
}
}
impl<D> From<MldReportDelay<EthernetLinkDevice, D>> for EthernetTimerId<D> {
fn from(id: MldReportDelay<EthernetLinkDevice, D>) -> EthernetTimerId<D> {
EthernetTimerId::Mld(id)
}
}
/// Handle an Ethernet timer firing.
pub(super) fn handle_timer<C: EthernetIpDeviceContext>(
ctx: &mut C,
id: EthernetTimerId<C::DeviceId>,
) {
match id {
EthernetTimerId::Arp(id) => arp::handle_timer(ctx, id.into()),
EthernetTimerId::Ndp(id) => <C as NdpHandler<EthernetLinkDevice>>::handle_timer(ctx, id),
EthernetTimerId::Igmp(id) => TimerHandler::handle_timer(ctx, id),
EthernetTimerId::Mld(id) => TimerHandler::handle_timer(ctx, id),
}
}
// If we are provided with an impl of `TimerContext<EthernetTimerId<_>>`, then
// we can in turn provide impls of `TimerContext` for ARP, NDP, IGMP, and MLD
// timers.
impl_timer_context!(
DeviceIdContext<EthernetLinkDevice>,
EthernetTimerId<<C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
NdpTimerId<EthernetLinkDevice, <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetTimerId::Ndp(id),
id
);
impl_timer_context!(
DeviceIdContext<EthernetLinkDevice>,
EthernetTimerId<<C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
ArpTimerId<EthernetLinkDevice, Ipv4Addr, <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetTimerId::Arp(id),
id
);
impl_timer_context!(
DeviceIdContext<EthernetLinkDevice>,
EthernetTimerId<<C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
IgmpTimerId<EthernetLinkDevice, <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetTimerId::Igmp(id),
id
);
impl_timer_context!(
DeviceIdContext<EthernetLinkDevice>,
EthernetTimerId<<C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
MldReportDelay<EthernetLinkDevice, <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
EthernetTimerId::Mld(id),
id
);
/// Initialize a device.
///
/// `initialize_device` sets the link-local address for `device_id` and performs DAD on it.
///
/// `device_id` MUST be ready to send packets before `initialize_device` is called.
pub(super) fn initialize_device<C: EthernetIpDeviceContext>(ctx: &mut C, device_id: C::DeviceId) {
//
// Assign a link-local address.
//
let state = ctx.get_state_with(device_id);
// There should be no way to add addresses to a device before it's
// initialized.
assert!(state.ip().ipv6_addr_sub.is_empty());
// Join the MAC-derived link-local address. Mark it as configured by SLAAC
// and not set to expire.
let addr_sub = state.link().mac.to_ipv6_link_local().into_witness();
add_ip_addr_subnet_inner(ctx, device_id, addr_sub, AddressConfigurationType::Slaac, None)
.expect(
"internal invariant violated: uninitialized device already had IP address assigned",
);
}
/// Send an IP packet in an Ethernet frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new Ethernet frame and sends it.
#[specialize_ip_address]
pub(super) fn send_ip_frame<
B: BufferMut,
C: EthernetIpDeviceContext + FrameContext<B, <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
A: IpAddress,
S: Serializer<Buffer = B>,
>(
ctx: &mut C,
device_id: C::DeviceId,
local_addr: SpecifiedAddr<A>,
body: S,
) -> Result<(), S> {
trace!("ethernet::send_ip_frame: local_addr = {:?}; device = {:?}", local_addr, device_id);
let state = ctx.get_state_mut_with(device_id).link_mut();
let (local_mac, mtu) = (state.mac, state.mtu);
let local_addr = local_addr.get();
let dst_mac = match MulticastAddr::new(local_addr) {
Some(multicast) => Ok(Mac::from(&multicast)),
None => {
#[ipv4addr]
{
arp::lookup(ctx, device_id, local_mac, local_addr).ok_or(IpAddr::V4(local_addr))
}
#[ipv6addr]
{
<C as NdpHandler<_>>::lookup(ctx, device_id, local_addr)
.ok_or(IpAddr::V6(local_addr))
}
}
};
match dst_mac {
Ok(dst_mac) => ctx
.send_frame(
device_id.into(),
body.with_mtu(mtu as usize).encapsulate(EthernetFrameBuilder::new(
local_mac,
dst_mac,
A::Version::ETHER_TYPE,
)),
)
.map_err(|ser| ser.into_inner().into_inner()),
Err(local_addr) => {
let state = ctx.get_state_mut_with(device_id).link_mut();
// The `serialize_vec_outer` call returns an `Either<B,
// Buf<Vec<u8>>`. We could naively call `.as_ref().to_vec()` on it,
// but if it were the `Buf<Vec<u8>>` variant, we'd be unnecessarily
// allocating a new `Vec` when we already have one. Instead, we
// leave the `Buf<Vec<u8>>` variant as it is, and only convert the
// `B` variant by calling `map_a`. That gives us an
// `Either<Buf<Vec<u8>>, Buf<Vec<u8>>`, which we call `into_inner`
// on to get a `Buf<Vec<u8>>`.
let frame = body
.with_mtu(mtu as usize)
.serialize_vec_outer()
.map_err(|ser| ser.1.into_inner())?
.map_a(|buffer| Buf::new(buffer.as_ref().to_vec(), ..))
.into_inner();
let dropped = state.add_pending_frame(local_addr, frame);
if let Some(dropped) = dropped {
// TODO(brunodalbo): Is it ok to silently just let this drop? Or
// should the IP layer be notified in any way?
log_unimplemented!((), "Ethernet dropped frame because ran out of allowable space");
}
Ok(())
}
}
}
/// Receive an Ethernet frame from the network.
pub(super) fn receive_frame<B: BufferMut, C: BufferEthernetIpDeviceContext<B>>(
ctx: &mut C,
device_id: C::DeviceId,
mut buffer: B,
) {
trace!("ethernet::receive_frame: device_id = {:?}", device_id);
// NOTE(joshlf): We do not currently validate that the Ethernet frame
// satisfies the minimum length requierment. We expect that if this
// requirement is necessary (due to requirements of the physical medium),
// the driver or hardware will have checked it, and that if this requirement
// is not necessary, it is acceptable for us to operate on a smaller
// Ethernet frame. If this becomes insufficient in the future, we may want
// to consider making this behavior configurable (at compile time, at
// runtime on a global basis, or at runtime on a per-device basis).
let frame = if let Ok(frame) =
buffer.parse_with::<_, EthernetFrame<_>>(EthernetFrameLengthCheck::NoCheck)
{
frame
} else {
trace!("ethernet::receive_frame: failed to parse ethernet frame");
// TODO(joshlf): Do something else?
return;
};
let (_, dst) = (frame.src_mac(), frame.dst_mac());
if !ctx.get_state_with(device_id).link().should_deliver(&dst) {
trace!("ethernet::receive_frame: destination mac {:?} not for device {:?}", dst, device_id);
return;
}
let frame_dst = FrameDestination::from(dst);
match frame.ethertype() {
Some(EtherType::Arp) => {
let types = if let Ok(types) = peek_arp_types(buffer.as_ref()) {
types
} else {
// TODO(joshlf): Do something else here?
return;
};
match types {
(ArpHardwareType::Ethernet, ArpNetworkType::Ipv4) => {
arp::receive_arp_packet(ctx, device_id, buffer)
}
}
}
Some(EtherType::Ipv4) => {
ctx.receive_frame(RecvIpFrameMeta::<_, Ipv4>::new(device_id, frame_dst), buffer)
}
Some(EtherType::Ipv6) => {
ctx.receive_frame(RecvIpFrameMeta::<_, Ipv6>::new(device_id, frame_dst), buffer)
}
Some(EtherType::Other(_)) | None => {} // TODO(joshlf)
}
}
/// Set the promiscuous mode flag on `device_id`.
pub(super) fn set_promiscuous_mode<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
enabled: bool,
) {
ctx.get_state_mut_with(device_id).link_mut().promiscuous_mode = enabled;
}
/// Get a single IP address for a device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not be returned by `get_ip_addr`.
///
/// For IPv6, this only returns global (not link-local) addresses.
#[specialize_ip_address]
pub(super) fn get_ip_addr_subnet<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &C,
device_id: C::DeviceId,
) -> Option<AddrSubnet<A>> {
#[ipv4addr]
return get_assigned_ip_addr_subnets(ctx, device_id).nth(0);
#[ipv6addr]
return get_assigned_ip_addr_subnets(ctx, device_id).find(|a| {
let addr: SpecifiedAddr<Ipv6Addr> = a.addr();
!addr.is_linklocal()
});
}
/// Get the IP address and subnet pais associated with this device which are in
/// the assigned state.
///
/// Tentative IP addresses (addresses which are not yet fully bound to a device)
/// and deprecated IP addresses (addresses which have been assigned but should
/// no longer be used for new connections) will not be returned by
/// `get_assigned_ip_addr_subnets`.
///
/// Returns an [`Iterator`] of `AddrSubnet<A>`.
///
/// See [`Tentative`] and [`AddrSubnet`] for more information.
#[specialize_ip_address]
pub(super) fn get_assigned_ip_addr_subnets<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &C,
device_id: C::DeviceId,
) -> FilterMap<
Iter<AddressEntry<A, C::Instant>>,
fn(&AddressEntry<A, C::Instant>) -> Option<AddrSubnet<A>>,
> {
let state = ctx.get_state_with(device_id).ip();
#[ipv4addr]
let addresses = &state.ipv4_addr_sub;
#[ipv6addr]
let addresses = &state.ipv6_addr_sub;
addresses.iter().filter_map(
|a| {
if a.state().is_assigned() {
Some(*a.addr_sub())
} else {
None
}
},
)
}
/// Get the IP address/subnet pairs associated with this device, including
/// tentative and deprecated addresses.
///
/// Returns an [`Iterator`] of `Tentative<AddrSubnet<A>>`.
///
/// See [`Tentative`] and [`AddrSubnet`] for more information.
#[specialize_ip_address]
pub(super) fn get_ip_addr_subnets<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &C,
device_id: C::DeviceId,
) -> Iter<AddressEntry<A, C::Instant>> {
let state = ctx.get_state_with(device_id).ip();
#[ipv4addr]
let addresses = &state.ipv4_addr_sub;
#[ipv6addr]
let addresses = &state.ipv6_addr_sub;
addresses.iter()
}
/// Get the state of an address on a device.
///
/// Returns `None` if `addr` is not associated with `device_id`.
pub(super) fn get_ip_addr_state<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &C,
device_id: C::DeviceId,
addr: &SpecifiedAddr<A>,
) -> Option<AddressState> {
get_ip_addr_state_inner(ctx, device_id, &addr.get(), None)
}
/// Get the state of an address on a device.
///
/// If `configuration_type` is provided, then only the state of an address of that
/// configuration type will be returned.
///
/// Returns `None` if `addr` is not associated with `device_id`.
// TODO(ghanan): Use `SpecializedAddr` for `addr`.
fn get_ip_addr_state_inner<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &C,
device_id: C::DeviceId,
addr: &A,
configuration_type: Option<AddressConfigurationType>,
) -> Option<AddressState> {
fn inner<A: IpAddress, I: Instant>(
addr_sub: &Vec<AddressEntry<A, I>>,
addr: A,
configuration_type: Option<AddressConfigurationType>,
) -> Option<AddressState> {
addr_sub.iter().find_map(|a| {
if a.addr_sub().addr().get() == addr
&& configuration_type.map_or(true, |x| x == a.configuration_type())
{
Some(a.state())
} else {
None
}
})
}
let state = ctx.get_state_with(device_id).ip();
addr.clone().with(
|addr| inner(&state.ipv4_addr_sub, addr, configuration_type),
|addr| inner(&state.ipv6_addr_sub, addr, configuration_type),
)
}
/// Adds an IP address and associated subnet to this device.
///
/// For IPv6, this function also joins the solicited-node multicast group and
/// begins performing Duplicate Address Detection (DAD).
pub(super) fn add_ip_addr_subnet<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &mut C,
device_id: C::DeviceId,
addr_sub: AddrSubnet<A>,
) -> Result<(), AddressError> {
// Add the IP address and mark it as a manually added address.
add_ip_addr_subnet_inner(ctx, device_id, addr_sub, AddressConfigurationType::Manual, None)
}
/// Adds an IP address and associated subnet to this device.
///
/// `configuration_type` is the way this address is being configured. See
/// [`AddressConfigurationType`] for more details.
///
/// For IPv6, this function also joins the solicited-node multicast group and
/// begins performing Duplicate Address Detection (DAD).
#[specialize_ip_address]
fn add_ip_addr_subnet_inner<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &mut C,
device_id: C::DeviceId,
addr_sub: AddrSubnet<A>,
configuration_type: AddressConfigurationType,
valid_until: Option<C::Instant>,
) -> Result<(), AddressError> {
let addr = addr_sub.addr().get();
if get_ip_addr_state_inner(ctx, device_id, &addr, None).is_some() {
return Err(AddressError::AlreadyExists);
}
let state = ctx.get_state_mut_with(device_id).ip_mut();
#[ipv4addr]
state.ipv4_addr_sub.push(AddressEntry::new(
addr_sub,
AddressState::Assigned,
configuration_type,
valid_until,
));
#[ipv6addr]
{
// First, join the solicited-node multicast group.
join_ip_multicast(ctx, device_id, addr.to_solicited_node_address());
let state = ctx.get_state_mut_with(device_id).ip_mut();
state.ipv6_addr_sub.push(AddressEntry::new(
addr_sub,
AddressState::Tentative,
configuration_type,
valid_until,
));
// Do Duplicate Address Detection on `addr`.
ctx.start_duplicate_address_detection(device_id, addr);
}
Ok(())
}
/// Removes an IP address and associated subnet from this device.
///
/// # Panics
///
/// Panics if `addr` is a link-local address.
// TODO(ghanan): Use a witness type to guarantee non-link-local-ness for `addr`.
pub(super) fn del_ip_addr<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &mut C,
device_id: C::DeviceId,
addr: &SpecifiedAddr<A>,
) -> Result<(), AddressError> {
del_ip_addr_inner(ctx, device_id, &addr.get(), None)
}
/// Removes an IP address and associated subnet from this device.
///
/// If `configuration_type` is provided, then only an address of that
/// configuration type will be removed.
fn del_ip_addr_inner<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &mut C,
device_id: C::DeviceId,
addr: &A,
configuration_type: Option<AddressConfigurationType>,
) -> Result<(), AddressError> {
// NOTE: We use two separate calls here rather than a single call to `.with`
// because both closures mutably borrow `builder`, and so they can't exist
// at the same time, which would be required in order to pass them both to
// `.with`.
addr.clone().with_v4(
|addr| {
let state = ctx.get_state_mut_with(device_id).ip_mut();
let original_size = state.ipv4_addr_sub.len();
if let Some(configuration_type) = configuration_type {
state.ipv4_addr_sub.retain(|x| {
(x.addr_sub().addr().get() != addr)
&& (x.configuration_type() == configuration_type)
});
} else {
state.ipv4_addr_sub.retain(|x| x.addr_sub().addr().get() != addr);
}
let new_size = state.ipv4_addr_sub.len();
if new_size == original_size {
return Err(AddressError::NotFound);
}
assert_eq!(original_size - new_size, 1);
Ok(())
},
Ok(()),
)?;
addr.clone().with_v6(
|addr| {
if let Some(state) = get_ip_addr_state_inner(ctx, device_id, &addr, configuration_type)
{
if state.is_tentative() {
// Cancel current duplicate address detection for `addr` as we are
// removing this IP.
//
// `cancel_duplicate_address_detection` may panic if we are not
// performing DAD on `addr`. However, we will only reach here
// if `addr` is marked as tentative. If `addr` is marked as
// tentative, then we know that we are performing DAD on it.
// Given this, we know `cancel_duplicate_address_detection` will
// not panic.
ctx.cancel_duplicate_address_detection(device_id, addr);
}
} else {
return Err(AddressError::NotFound);
}
let state = ctx.get_state_mut_with(device_id).ip_mut();
let original_size = state.ipv6_addr_sub.len();
state.ipv6_addr_sub.retain(|x| x.addr_sub().addr().get() != addr);
let new_size = state.ipv6_addr_sub.len();
// Since we just checked earlier if we had the address, we must have removed it
// now.
assert_eq!(original_size - new_size, 1);
// Leave the the solicited-node multicast group.
leave_ip_multicast(ctx, device_id, addr.to_solicited_node_address());
Ok(())
},
Ok(()),
)
}
/// Get a (non-tentative) IPv6 link-local address associated with this device.
///
/// No guarantee is made that two calls to this function will return the same
/// link-local address if multiple are available.
///
/// Returns `None` if `device_id` does not have a non-tentative link-local
/// address.
pub(super) fn get_ipv6_link_local_addr<C: EthernetIpDeviceContext>(
ctx: &C,
device_id: C::DeviceId,
) -> Option<LinkLocalAddr<Ipv6Addr>> {
ctx.get_state_with(device_id).ip().ipv6_addr_sub.iter().find_map(|a| {
if a.state().is_assigned() {
LinkLocalAddr::new(a.addr_sub().addr().get())
} else {
None
}
})
}
/// Add `device_id` to a link multicast group `multicast_addr`.
///
/// Calling `join_link_multicast` with the same `device_id` and `multicast_addr` is completely safe.
/// A counter will be kept for the number of times `join_link_multicast` has been called with the
/// same `device_id` and `multicast_addr` pair. To completely leave a multicast group,
/// [`leave_link_multicast`] must be called the same number of times `join_link_multicast` has been
/// called for the same `device_id` and `multicast_addr` pair. The first time `join_link_multicast`
/// is called for a new `device` and `multicast_addr` pair, the device will actually join the
/// multicast group.
///
/// `join_link_multicast` is different from [`join_ip_multicast`] as `join_link_multicast` joins an
/// L2 multicast group, whereas `join_ip_multicast` joins an L3 multicast group.
pub(super) fn join_link_multicast<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
multicast_addr: MulticastAddr<Mac>,
) {
let device_state = ctx.get_state_mut_with(device_id).link_mut();
let groups = &mut device_state.link_multicast_groups;
let counter = groups.entry(multicast_addr).or_insert(0);
*counter += 1;
if *counter == 1 {
trace!("ethernet::join_link_multicast: joining link multicast {:?}", multicast_addr,);
} else {
trace!(
"ethernet::join_link_multicast: already joinined link multicast {:?}, counter = {}",
multicast_addr,
*counter,
);
}
}
/// Remove `device_id` from a link multicast group `multicast_addr`.
///
/// `leave_link_multicast` will attempt to remove `device_id` from the multicast group
/// `multicast_addr`. `device_id` may have "joined" the same multicast address multiple times, so
/// `device_id` will only leave the multicast group once `leave_ip_multicast` has been called for
/// each corresponding [`join_link_multicast`]. That is, if `join_link_multicast` gets called 3
/// times and `leave_link_multicast` gets called two times (after all 3 `join_link_multicast`
/// calls), `device_id` will still be in the multicast group until the next (final) call to
/// `leave_link_multicast`.
///
/// `leave_link_multicast` is different from [`leave_ip_multicast`] as `leave_link_multicast` leaves
/// an L2 multicast group, whereas `leave_ip_multicast` leaves an L3 multicast group.
///
/// # Panics
///
/// If `device_id` is not in the multicast group `multicast_addr`.
fn leave_link_multicast<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
multicast_addr: MulticastAddr<Mac>,
) {
let device_state = ctx.get_state_mut_with(device_id).link_mut();
let groups = &mut device_state.link_multicast_groups;
// Will panic if `device_id` has not yet joined the multicast address.
let counter = groups.get_mut(&multicast_addr).unwrap();
if *counter == 1 {
trace!("ethernet::leave_link_multicast: leaving link multicast {:?}", multicast_addr,);
groups.remove(&multicast_addr);
} else {
*counter -= 1;
trace!(
"ethernet::leave_link_multicast: not leaving link multicast {:?} as there are still listeners for it, counter = {}",
multicast_addr,
*counter,
);
}
}
/// Add `device_id` to a multicast group `multicast_addr`.
///
/// Calling `join_ip_multicast` with the same `device_id` and `multicast_addr` is completely safe.
/// A counter will be kept for the number of times `join_ip_multicast` has been called with the
/// same `device_id` and `multicast_addr` pair. To completely leave a multicast group,
/// [`leave_ip_multicast`] must be called the same number of times `join_ip_multicast` has been
/// called for the same `device_id` and `multicast_addr` pair. The first time `join_ip_multicast` is
/// called for a new `device` and `multicast_addr` pair, the device will actually join the multicast
/// group.
///
/// `join_ip_multicast` is different from [`join_link_multicast`] as `join_ip_multicast` joins an
/// L3 multicast group, whereas `join_link_multicast` joins an L2 multicast group.
#[specialize_ip_address]
pub(super) fn join_ip_multicast<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &mut C,
device_id: C::DeviceId,
multicast_addr: MulticastAddr<A>,
) {
#[ipv4addr]
let res = ctx.igmp_join_group(device_id, multicast_addr);
#[ipv6addr]
let res = ctx.mld_join_group(device_id, multicast_addr);
match res {
GroupJoinResult::Joined(()) => {
let mac = MulticastAddr::from(&multicast_addr);
trace!(
"ethernet::join_ip_multicast: joining IP multicast {:?} and MAC multicast {:?}",
multicast_addr,
mac
);
join_link_multicast(ctx, device_id, mac);
}
GroupJoinResult::AlreadyMember => trace!(
"ethernet::join_ip_multicast: already joinined IP multicast {:?}",
multicast_addr,
),
}
}
/// Remove `device_id` from a multicast group `multicast_addr`.
///
/// `leave_ip_multicast` will attempt to remove `device_id` from a multicast group `multicast_addr`.
/// `device_id` may have "joined" the same multicast address multiple times, so `device_id` will
/// only leave the multicast group once `leave_ip_multicast` has been called for each corresponding
/// [`join_ip_multicast`]. That is, if `join_ip_multicast` gets called 3 times and
/// `leave_ip_multicast` gets called two times (after all 3 `join_ip_multicast` calls), `device_id`
/// will still be in the multicast group until the next (final) call to `leave_ip_multicast`.
///
/// `leave_ip_multicast` is different from [`leave_link_multicast`] as `leave_ip_multicast` leaves
/// an L3 multicast group, whereas `leave_link_multicast` leaves an L2 multicast group.
///
/// # Panics
///
/// If `device_id` is not currently in the multicast group `multicast_addr`.
#[specialize_ip_address]
pub(super) fn leave_ip_multicast<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &mut C,
device_id: C::DeviceId,
multicast_addr: MulticastAddr<A>,
) {
#[ipv4addr]
let res = ctx.igmp_leave_group(device_id, multicast_addr);
#[ipv6addr]
let res = ctx.mld_leave_group(device_id, multicast_addr);
match res {
GroupLeaveResult::Left(()) => {
let mac = MulticastAddr::from(&multicast_addr);
trace!(
"ethernet::leave_ip_multicast: leaving IP multicast {} and MAC multicast {}",
multicast_addr,
mac
);
leave_link_multicast(ctx, device_id, mac);
}
GroupLeaveResult::StillMember => trace!(
"ethernet::leave_ip_multicast: not leaving IP multicast {} as there are still listeners for it",
multicast_addr,
),
GroupLeaveResult::NotMember => panic!(
"attempted to leave IP multicast group we were not a member of: {}",
multicast_addr,
),
}
}
/// Is `device` in the IP multicast group `multicast_addr`?
#[specialize_ip_address]
pub(super) fn is_in_ip_multicast<C: EthernetIpDeviceContext, A: IpAddress>(
ctx: &C,
device_id: C::DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
#[ipv4addr]
return ctx.get_state_with(device_id).ip().ipv4_multicast_groups.contains(&multicast_addr);
#[ipv6addr]
return ctx.get_state_with(device_id).ip().ipv6_multicast_groups.contains(&multicast_addr);
}
/// Get the MTU associated with this device.
pub(super) fn get_mtu<C: EthernetIpDeviceContext>(ctx: &C, device_id: C::DeviceId) -> u32 {
ctx.get_state_with(device_id).link().mtu
}
/// Get the hop limit for new IPv6 packets that will be sent out from `device_id`.
pub(super) fn get_ipv6_hop_limit<C: EthernetIpDeviceContext>(
ctx: &C,
device_id: C::DeviceId,
) -> NonZeroU8 {
ctx.get_state_with(device_id).ip().ipv6_hop_limit
}
/// Is IP packet routing enabled on `device_id`?
///
/// Note, `true` does not necessarily mean that `device` is currently routing IP packets. It
/// only means that `device` is allowed to route packets. To route packets, this netstack must
/// be configured to allow IP packets to be routed if it was not destined for this node.
pub(super) fn is_routing_enabled<C: EthernetIpDeviceContext, I: Ip>(
ctx: &C,
device_id: C::DeviceId,
) -> bool {
let state = &ctx.get_state_with(device_id).ip();
match I::VERSION {
IpVersion::V4 => state.route_ipv4,
IpVersion::V6 => state.route_ipv6,
}
}
/// Sets the IP packet routing flag on `device_id`.
///
/// This method MUST NOT be called directly. It MUST only only called by
/// [`crate::device::set_routing_enabled`].
///
/// See [`crate::device::set_routing_enabled`] for more information.
pub(super) fn set_routing_enabled_inner<C: EthernetIpDeviceContext, I: Ip>(
ctx: &mut C,
device_id: C::DeviceId,
enabled: bool,
) {
let state = ctx.get_state_mut_with(device_id).ip_mut();
match I::VERSION {
IpVersion::V4 => state.route_ipv4 = enabled,
IpVersion::V6 => state.route_ipv6 = enabled,
}
}
/// Insert a static entry into this device's ARP table.
///
/// This will cause any conflicting dynamic entry to be removed, and
/// any future conflicting gratuitous ARPs to be ignored.
// TODO(rheacock): remove `cfg(test)` when this is used. Will probably be
// called by a pub fn in the device mod.
#[cfg(test)]
pub(super) fn insert_static_arp_table_entry<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
addr: Ipv4Addr,
mac: Mac,
) {
arp::insert_static_neighbor(ctx, device_id, addr, mac)
}
/// Insert an entry into this device's NDP table.
///
/// This method only gets called when testing to force set a neighbor's
/// link address so that lookups succeed immediately, without doing
/// address resolution.
// TODO(rheacock): remove when this is called from non-test code
#[cfg(test)]
pub(super) fn insert_ndp_table_entry<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
addr: Ipv6Addr,
mac: Mac,
) {
<C as NdpHandler<_>>::insert_static_neighbor(ctx, device_id, addr, mac)
}
/// Deinitializes and cleans up state for ethernet devices
///
/// After this function is called, the ethernet device should not be used and
/// nothing else should be done with the state.
pub(super) fn deinitialize<C: EthernetIpDeviceContext>(ctx: &mut C, device_id: C::DeviceId) {
arp::deinitialize(ctx, device_id);
<C as NdpHandler<_>>::deinitialize(ctx, device_id);
}
impl<C: EthernetIpDeviceContext> StateContext<ArpState<EthernetLinkDevice, Ipv4Addr>, C::DeviceId>
for C
{
fn get_state_with(&self, id: C::DeviceId) -> &ArpState<EthernetLinkDevice, Ipv4Addr> {
&self.get_state_with(id).link().ipv4_arp
}
fn get_state_mut_with(
&mut self,
id: C::DeviceId,
) -> &mut ArpState<EthernetLinkDevice, Ipv4Addr> {
&mut self.get_state_mut_with(id).link_mut().ipv4_arp
}
}
impl<
B: BufferMut,
C: EthernetIpDeviceContext
+ FrameContext<B, <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId>,
> FrameContext<B, ArpFrameMetadata<EthernetLinkDevice, C::DeviceId>> for C
{
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
meta: ArpFrameMetadata<EthernetLinkDevice, C::DeviceId>,
body: S,
) -> Result<(), S> {
let src = self.get_state_with(meta.device_id).link().mac;
self.send_frame(
meta.device_id,
body.encapsulate(EthernetFrameBuilder::new(src, meta.dst_addr, EtherType::Arp)),
)
.map_err(Nested::into_inner)
}
}
impl<C: EthernetIpDeviceContext> ArpDeviceIdContext<EthernetLinkDevice> for C {
type DeviceId = <C as DeviceIdContext<EthernetLinkDevice>>::DeviceId;
}
impl<C: EthernetIpDeviceContext> ArpContext<EthernetLinkDevice, Ipv4Addr> for C {
fn get_protocol_addr(
&self,
device_id: <C as ArpDeviceIdContext<EthernetLinkDevice>>::DeviceId,
) -> Option<Ipv4Addr> {
get_ip_addr_subnet::<_, Ipv4Addr>(self, device_id.into()).map(|a| a.addr().get())
}
fn get_hardware_addr(
&self,
device_id: <C as ArpDeviceIdContext<EthernetLinkDevice>>::DeviceId,
) -> Mac {
self.get_state_with(device_id.into()).link().mac
}
fn address_resolved(
&mut self,
device_id: <C as ArpDeviceIdContext<EthernetLinkDevice>>::DeviceId,
proto_addr: Ipv4Addr,
hw_addr: Mac,
) {
mac_resolved(self, device_id.into(), IpAddr::V4(proto_addr), hw_addr);
}
fn address_resolution_failed(
&mut self,
device_id: <C as ArpDeviceIdContext<EthernetLinkDevice>>::DeviceId,
proto_addr: Ipv4Addr,
) {
mac_resolution_failed(self, device_id.into(), IpAddr::V4(proto_addr));
}
fn address_resolution_expired(
&mut self,
_device_id: <C as ArpDeviceIdContext<EthernetLinkDevice>>::DeviceId,
_proto_addr: Ipv4Addr,
) {
log_unimplemented!((), "ArpContext::address_resolution_expired");
}
}
impl<C: EthernetIpDeviceContext> StateContext<NdpState<EthernetLinkDevice, C::Instant>, C::DeviceId>
for C
{
fn get_state_with(&self, id: C::DeviceId) -> &NdpState<EthernetLinkDevice, C::Instant> {
&self.get_state_with(id).link().ndp
}
fn get_state_mut_with(
&mut self,
id: C::DeviceId,
) -> &mut NdpState<EthernetLinkDevice, C::Instant> {
&mut self.get_state_mut_with(id).link_mut().ndp
}
}
impl<C: EthernetIpDeviceContext> NdpContext<EthernetLinkDevice> for C {
fn get_link_layer_addr(&self, device_id: C::DeviceId) -> Mac {
self.get_state_with(device_id).link().mac
}
fn get_interface_identifier(&self, device_id: C::DeviceId) -> [u8; 8] {
self.get_state_with(device_id).link().mac.to_eui64()
}
fn get_link_local_addr(
&self,
device_id: C::DeviceId,
) -> Option<Tentative<LinkLocalAddr<Ipv6Addr>>> {
self.get_state_with(device_id).ip().ipv6_addr_sub.iter().find_map(|a| {
let addr = LinkLocalAddr::new(a.addr_sub().addr().get())?;
Some(if a.state().is_tentative() {
Tentative::new_tentative(addr)
} else {
Tentative::new_permanent(addr)
})
})
}
fn get_ipv6_addr(&self, device_id: C::DeviceId) -> Option<Ipv6Addr> {
// Return a non tentative global address, or the link-local address if no non-tentative
// global addressses are associated with `device_id`.
match get_ip_addr_subnet::<_, Ipv6Addr>(self, device_id) {
Some(addr_sub) => Some(addr_sub.addr().get()),
None => Self::get_link_local_addr(self, device_id)
.map(|a| a.try_into_permanent())
.map(|a| a.map(Witness::into_addr))
.unwrap_or(None),
}
}
type AddrEntriesIter = IntoIter<AddressEntry<Ipv6Addr, C::Instant>>;
fn get_ipv6_addr_entries(
&self,
device_id: C::DeviceId,
) -> IntoIter<AddressEntry<Ipv6Addr, C::Instant>> {
// TODO(joshlf): The fact that we clone the entire list of entries here
// is just so that we can avoid writing out a large, ugly function
// signature for `get_ipv6_addr_entries`. We would like to have the
// return value be `impl Iterator`, but impl trait is not yet supported
// on trait methods. Instead, `NdpContext` has the associated
// `AddrEntriesIter` type. However, due to lifetime issues, this
// precludes us from returning an iterator whose lifetime returns on the
// lifetime of `self` passed to this method, which in turn means that
// our only option is to return entries by value, which requires
// cloning. Since this isn't in the hot path, we accept the cost of
// cloning the vector, but it would be great if we could solve this in a
// better way.
let mut addrs = self.get_state_with(device_id).ip().ipv6_addr_sub.clone();
addrs.retain(|a| {
let addr: SpecifiedAddr<Ipv6Addr> = a.addr_sub().addr();
!addr.is_linklocal()
});
addrs.into_iter()
}
fn ipv6_addr_state(&self, device_id: C::DeviceId, address: &Ipv6Addr) -> Option<AddressState> {
let address = SpecifiedAddr::new(*address)?;
get_ip_addr_state::<_, Ipv6Addr>(self, device_id, &address)
}
fn address_resolved(&mut self, device_id: C::DeviceId, address: &Ipv6Addr, link_address: Mac) {
mac_resolved(self, device_id, IpAddr::V6(*address), link_address);
}
fn address_resolution_failed(&mut self, device_id: C::DeviceId, address: &Ipv6Addr) {
mac_resolution_failed(self, device_id, IpAddr::V6(*address));
}
fn duplicate_address_detected(&mut self, device_id: C::DeviceId, addr: Ipv6Addr) {
let state = self.get_state_mut_with(device_id).ip_mut();
let original_size = state.ipv6_addr_sub.len();
state.ipv6_addr_sub.retain(|x| x.addr_sub().addr().get() != addr);
assert_eq!(
state.ipv6_addr_sub.len(),
original_size - 1,
"duplicate address detected, but not in our list of addresses"
);
// Leave the the solicited-node multicast group.
leave_ip_multicast(self, device_id, addr.to_solicited_node_address());
// TODO: we need to pick a different address depending on what flow we are using.
}
fn unique_address_determined(&mut self, device_id: C::DeviceId, addr: Ipv6Addr) {
trace!(
"ethernet::unique_address_determined: device_id = {:?}; addr = {:?}",
device_id,
addr
);
let state = self.get_state_mut_with(device_id).ip_mut();
if let Some(entry) =
state.ipv6_addr_sub.iter_mut().find(|a| a.addr_sub().addr().get() == addr)
{
entry.mark_permanent();
} else {
panic!("Attempted to resolve an unknown tentative address");
}
}
fn set_mtu(&mut self, device_id: C::DeviceId, mut mtu: u32) {
// TODO(ghanan): Should this new MTU be updated only from the netstack's perspective or
// be exposed to the device hardware?
// `mtu` must not be less than the minimum IPv6 MTU.
assert!(mtu >= Ipv6::MINIMUM_LINK_MTU.into());
let dev_state = self.get_state_mut_with(device_id).link_mut();
// If `mtu` is greater than what the device supports, set `mtu` to the maximum MTU the
// device supports.
if mtu > dev_state.hw_mtu {
trace!("ethernet::ndp_device::set_mtu: MTU of {:?} is greater than the device {:?}'s max MTU of {:?}, using device's max MTU instead", mtu, device_id, dev_state.hw_mtu);
mtu = dev_state.hw_mtu;
}
trace!("ethernet::ndp_device::set_mtu: setting link MTU to {:?}", mtu);
dev_state.mtu = mtu;
}
fn set_hop_limit(&mut self, device_id: Self::DeviceId, hop_limit: NonZeroU8) {
self.get_state_mut_with(device_id).ip_mut().ipv6_hop_limit = hop_limit;
}
fn add_slaac_addr_sub(
&mut self,
device_id: Self::DeviceId,
addr_sub: AddrSubnet<Ipv6Addr>,
valid_until: Self::Instant,
) -> Result<(), AddressError> {
trace!(
"ethernet::add_slaac_addr_sub: adding address {:?} on device {:?}",
addr_sub,
device_id
);
add_ip_addr_subnet_inner(
self,
device_id,
addr_sub,
AddressConfigurationType::Slaac,
Some(valid_until),
)
}
fn deprecate_slaac_addr(&mut self, device_id: Self::DeviceId, addr: &Ipv6Addr) {
trace!(
"ethernet::deprecate_slaac_addr: deprecating address {:?} on device {:?}",
addr,
device_id
);
let state = self.get_state_mut_with(device_id).ip_mut();
if let Some(entry) = state.ipv6_addr_sub.iter_mut().find(|a| {
(a.addr_sub().addr().get() == *addr)
&& a.configuration_type() == AddressConfigurationType::Slaac
}) {
match entry.state {
AddressState::Assigned => {
entry.state = AddressState::Deprecated;
}
AddressState::Tentative => {
trace!("ethernet::deprecate_slaac_addr: invalidating the deprecated tentative address {:?} on device {:?}", addr, device_id);
// If `addr` is currently tentative on `device_id`, the address should simply
// be invalidated as new connections should not use a deprecated address,
// and we should have no existing connections using a tentative address.
// We must have had an invalidation timeout if we just attempted to deprecate.
assert!(self
.cancel_timer(
ndp::NdpTimerId::new_invalidate_slaac_address(device_id, *addr).into()
)
.is_some());
Self::invalidate_slaac_addr(self, device_id, addr);
}
AddressState::Deprecated => unreachable!(
"We should never attempt to deprecate an already deprecated address"
),
}
} else {
panic!("Address is not configured via SLAAC on this device");
}
}
fn invalidate_slaac_addr(&mut self, device_id: Self::DeviceId, addr: &Ipv6Addr) {
trace!(
"ethernet::invalidate_slaac_addr: invalidating address {:?} on device {:?}",
addr,
device_id
);
// `unwrap` will panic if `addr` is not an address configured via SLAAC on `device_id`.
del_ip_addr_inner(self, device_id, addr, Some(AddressConfigurationType::Slaac)).unwrap();
}
fn update_slaac_addr_valid_until(
&mut self,
device_id: Self::DeviceId,
addr: &Ipv6Addr,
valid_until: Self::Instant,
) {
trace!(
"ethernet::update_slaac_addr_valid_until: updating address {:?}'s valid until instant to {:?} on device {:?}",
addr,
valid_until,
device_id
);
let state = self.get_state_mut_with(device_id).ip_mut();
if let Some(entry) = state.ipv6_addr_sub.iter_mut().find(|a| {
(a.addr_sub().addr().get() == *addr)
&& a.configuration_type() == AddressConfigurationType::Slaac
}) {
entry.valid_until = Some(valid_until);
} else {
panic!("Address is not configured via SLAAC on this device");
}
}
fn is_router(&self, device_id: Self::DeviceId) -> bool {
self.is_router_device::<Ipv6>(device_id)
}
fn send_ipv6_frame<S: Serializer<Buffer = EmptyBuf>>(
&mut self,
device_id: Self::DeviceId,
next_hop: Ipv6Addr,
body: S,
) -> Result<(), S> {
// `device_id` must not be uninitialized.
assert!(self.is_device_usable(device_id));
// TODO(joshlf): Wire `SpecifiedAddr` through the `ndp` module.
send_ip_frame(self, device_id, SpecifiedAddr::new(next_hop).unwrap(), body)
}
}
/// An implementation of the [`LinkDevice`] trait for Ethernet devices.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) struct EthernetLinkDevice;
impl LinkDevice for EthernetLinkDevice {
type Address = Mac;
}
/// Sends out any pending frames that are waiting for link layer address
/// resolution.
///
/// `mac_resolved` is the common logic used when a link layer address is
/// resolved either by ARP or NDP.
fn mac_resolved<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
address: IpAddr,
dst_mac: Mac,
) {
let state = ctx.get_state_mut_with(device_id).link_mut();
let src_mac = state.mac;
let ether_type = match &address {
IpAddr::V4(_) => EtherType::Ipv4,
IpAddr::V6(_) => EtherType::Ipv6,
};
if let Some(pending) = state.take_pending_frames(address) {
for frame in pending {
// NOTE(brunodalbo): We already performed MTU checking when we
// saved the buffer waiting for address resolution. It should
// be noted that the MTU check back then didn't account for
// ethernet frame padding required by EthernetFrameBuilder,
// but that's fine (as it stands right now) because the MTU
// is guaranteed to be larger than an Ethernet minimum frame
// body size.
let res = ctx.send_frame(
device_id.into(),
frame.encapsulate(EthernetFrameBuilder::new(src_mac, dst_mac, ether_type)),
);
if let Err(_) = res {
// TODO(joshlf): Do we want to handle this differently?
debug!("Failed to send pending frame; MTU changed since frame was queued");
}
}
}
}
/// Clears out any pending frames that are waiting for link layer address
/// resolution.
///
/// `mac_resolution_failed` is the common logic used when a link layer address
/// fails to resolve either by ARP or NDP.
fn mac_resolution_failed<C: EthernetIpDeviceContext>(
ctx: &mut C,
device_id: C::DeviceId,
address: IpAddr,
) {
// TODO(brunodalbo) what do we do here in regards to the pending frames?
// NDP's RFC explicitly states unreachable ICMP messages must be generated:
// "If no Neighbor Advertisement is received after MAX_MULTICAST_SOLICIT
// solicitations, address resolution has failed. The sender MUST return
// ICMP destination unreachable indications with code 3
// (Address Unreachable) for each packet queued awaiting address
// resolution."
// For ARP, we don't have such a clear statement on the RFC, it would make
// sense to do the same thing though.
let state = ctx.get_state_mut_with(device_id).link_mut();
if let Some(_) = state.take_pending_frames(address) {
log_unimplemented!((), "ethernet mac resolution failed not implemented");
}
}
#[cfg(test)]
mod tests {
use packet::Buf;
use packet_formats::icmp::{IcmpDestUnreachable, IcmpIpExt};
use packet_formats::ip::{IpExt, IpPacketBuilder, IpProto};
use packet_formats::testdata::{dns_request_v4, dns_request_v6};
use packet_formats::testutil::{
parse_icmp_packet_in_ip_packet_in_ethernet_frame, parse_ip_packet_in_ethernet_frame,
};
use rand::Rng;
use rand_xorshift::XorShiftRng;
use specialize_ip_macro::{ip_test, specialize_ip};
use super::*;
use crate::context::testutil::DummyInstant;
use crate::device::{
arp::ArpHandler, is_routing_enabled, set_routing_enabled, DeviceId, EthernetDeviceId,
IpLinkDeviceState,
};
use crate::ip::{
dispatch_receive_ip_packet_name, receive_ip_packet, DummyDeviceId, IpDeviceIdContext,
};
use crate::testutil::{
add_arp_or_ndp_table_entry, get_counter_val, new_rng, DummyEventDispatcher,
DummyEventDispatcherBuilder, FakeCryptoRng, TestIpExt, DUMMY_CONFIG_V4,
};
use crate::StackStateBuilder;
struct DummyEthernetContext {
state: IpLinkDeviceState<DummyInstant, EthernetDeviceState<DummyInstant>>,
}
impl DummyEthernetContext {
fn new(mac: Mac, mtu: u32) -> DummyEthernetContext {
DummyEthernetContext {
state: IpLinkDeviceState::new(EthernetDeviceStateBuilder::new(mac, mtu).build()),
}
}
}
type DummyContext = crate::context::testutil::DummyContext<
DummyEthernetContext,
EthernetTimerId<DummyDeviceId>,
DummyDeviceId,
>;
impl
DualStateContext<
IpLinkDeviceState<DummyInstant, EthernetDeviceState<DummyInstant>>,
FakeCryptoRng<XorShiftRng>,
DummyDeviceId,
> for DummyContext
{
fn get_states_with(
&self,
_id0: DummyDeviceId,
_id1: (),
) -> (
&IpLinkDeviceState<DummyInstant, EthernetDeviceState<DummyInstant>>,
&FakeCryptoRng<XorShiftRng>,
) {
let (state, rng) = self.get_states_with((), ());
(&state.state, rng)
}
fn get_states_mut_with(
&mut self,
_id0: DummyDeviceId,
_id1: (),
) -> (
&mut IpLinkDeviceState<DummyInstant, EthernetDeviceState<DummyInstant>>,
&mut FakeCryptoRng<XorShiftRng>,
) {
let (state, rng) = self.get_states_mut_with((), ());
(&mut state.state, rng)
}
}
impl DeviceIdContext<EthernetLinkDevice> for DummyContext {
type DeviceId = DummyDeviceId;
}
impl IpDeviceIdContext for DummyContext {
type DeviceId = DummyDeviceId;
}
impl
IpDeviceContext<
EthernetLinkDevice,
EthernetTimerId<DummyDeviceId>,
EthernetDeviceState<DummyInstant>,
> for DummyContext
{
fn is_router_device<I: Ip>(&self, _device: DummyDeviceId) -> bool {
unimplemented!()
}
fn is_device_usable(&self, _device: DummyDeviceId) -> bool {
unimplemented!()
}
}
#[test]
fn test_mtu() {
// Test that we send an Ethernet frame whose size is less than the MTU,
// and that we don't send an Ethernet frame whose size is greater than
// the MTU.
fn test(size: usize, expect_frames_sent: usize) {
let mut ctx = DummyContext::with_state(DummyEthernetContext::new(
DUMMY_CONFIG_V4.local_mac,
Ipv6::MINIMUM_LINK_MTU.into(),
));
<DummyContext as ArpHandler<_, _>>::insert_static_neighbor(
&mut ctx,
DummyDeviceId,
DUMMY_CONFIG_V4.remote_ip.get(),
DUMMY_CONFIG_V4.remote_mac,
);
let _ = send_ip_frame(
&mut ctx,
DummyDeviceId,
DUMMY_CONFIG_V4.remote_ip,
Buf::new(&mut vec![0; size], ..),
);
assert_eq!(ctx.frames().len(), expect_frames_sent);
}
test(Ipv6::MINIMUM_LINK_MTU.into(), 1);
test(usize::from(Ipv6::MINIMUM_LINK_MTU) + 1, 0);
}
#[test]
fn test_pending_frames() {
let mut state = EthernetDeviceStateBuilder::new(
DUMMY_CONFIG_V4.local_mac,
Ipv6::MINIMUM_LINK_MTU.into(),
)
.build::<DummyInstant>();
let ip = IpAddr::V4(DUMMY_CONFIG_V4.local_ip.into_addr());
state.add_pending_frame(ip, Buf::new(vec![1], ..));
state.add_pending_frame(ip, Buf::new(vec![2], ..));
state.add_pending_frame(ip, Buf::new(vec![3], ..));
// check that we're accumulating correctly...
assert_eq!(3, state.take_pending_frames(ip).unwrap().count());
// ...and that take_pending_frames clears all the buffered data.
assert!(state.take_pending_frames(ip).is_none());
for i in 0..ETHERNET_MAX_PENDING_FRAMES {
assert!(state.add_pending_frame(ip, Buf::new(vec![i as u8], ..)).is_none());
}
// check that adding more than capacity will drop the older buffers as
// a proper FIFO queue.
assert_eq!(0, state.add_pending_frame(ip, Buf::new(vec![255], ..)).unwrap().as_ref()[0]);
assert_eq!(1, state.add_pending_frame(ip, Buf::new(vec![255], ..)).unwrap().as_ref()[0]);
assert_eq!(2, state.add_pending_frame(ip, Buf::new(vec![255], ..)).unwrap().as_ref()[0]);
}
#[specialize_ip]
fn test_receive_ip_frame<I: Ip>(initialize: bool) {
//
// Should only receive a frame if the device is initialized
//
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
#[ipv4]
let mut bytes = dns_request_v4::ETHERNET_FRAME.bytes.to_vec();
#[ipv6]
let mut bytes = dns_request_v6::ETHERNET_FRAME.bytes.to_vec();
let mac_bytes = config.local_mac.bytes();
bytes[0..6].copy_from_slice(&mac_bytes);
if initialize {
crate::device::initialize_device(&mut ctx, device);
}
// Will panic if we do not initialize.
crate::device::receive_frame(&mut ctx, device, Buf::new(bytes, ..));
// If we did not initialize, we would not reach here since
// `receive_frame` would have paniced.
#[ipv4]
assert_eq!(get_counter_val(&mut ctx, "receive_ipv4_packet"), 1);
#[ipv6]
assert_eq!(get_counter_val(&mut ctx, "receive_ipv6_packet"), 1);
}
#[ip_test]
#[should_panic(expected = "assertion failed: is_device_initialized(ctx.state(), device)")]
fn receive_frame_uninitialized<I: Ip>() {
test_receive_ip_frame::<I>(false);
}
#[ip_test]
fn receive_frame_initialized<I: Ip>() {
test_receive_ip_frame::<I>(true);
}
#[specialize_ip]
fn test_send_ip_frame<I: Ip>(initialize: bool) {
//
// Should only send a frame if the device is initialized
//
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
#[ipv4]
let mut bytes = dns_request_v4::ETHERNET_FRAME.bytes.to_vec();
#[ipv6]
let mut bytes = dns_request_v6::ETHERNET_FRAME.bytes.to_vec();
let mac_bytes = config.local_mac.bytes();
bytes[6..12].copy_from_slice(&mac_bytes);
if initialize {
crate::device::initialize_device(&mut ctx, device);
}
// Will panic if we do not initialize.
let _ =
crate::device::send_ip_frame(&mut ctx, device, config.remote_ip, Buf::new(bytes, ..));
}
#[ip_test]
#[should_panic(expected = "assertion failed: is_device_usable(ctx.state(), device)")]
fn test_send_frame_uninitialized<I: Ip>() {
test_send_ip_frame::<I>(false);
}
#[ip_test]
fn test_send_frame_initialized<I: Ip>() {
test_send_ip_frame::<I>(true);
}
#[test]
fn initialize_once() {
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device = ctx
.state_mut()
.add_ethernet_device(DUMMY_CONFIG_V4.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
}
#[test]
#[should_panic(expected = "assertion failed: state.is_uninitialized()")]
fn initialize_multiple() {
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device = ctx
.state_mut()
.add_ethernet_device(DUMMY_CONFIG_V4.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
// Should panic since we are already initialized.
crate::device::initialize_device(&mut ctx, device);
}
#[ip_test]
fn test_set_ip_routing<I: Ip + TestIpExt + IcmpIpExt + IpExt>() {
fn check_other_is_routing_enabled<I: Ip>(
ctx: &Context<DummyEventDispatcher>,
device: DeviceId,
expected: bool,
) {
let enabled = match I::VERSION {
IpVersion::V4 => is_routing_enabled::<_, Ipv6>(ctx, device),
IpVersion::V6 => is_routing_enabled::<_, Ipv4>(ctx, device),
};
assert_eq!(enabled, expected);
}
fn check_icmp<I: Ip>(buf: &[u8]) {
match I::VERSION {
IpVersion::V4 => {
let _ = parse_icmp_packet_in_ip_packet_in_ethernet_frame::<
Ipv4,
_,
IcmpDestUnreachable,
_,
>(buf, |_| {})
.unwrap();
}
IpVersion::V6 => {
let _ = parse_icmp_packet_in_ip_packet_in_ethernet_frame::<
Ipv6,
_,
IcmpDestUnreachable,
_,
>(buf, |_| {})
.unwrap();
}
}
}
let src_ip = I::get_other_ip_address(3);
let src_mac = Mac::new([10, 11, 12, 13, 14, 15]);
let config = I::DUMMY_CONFIG;
let device = DeviceId::new_ethernet(0);
let frame_dst = FrameDestination::Unicast;
let mut rng = new_rng(70812476915813);
let mut body: Vec<u8> = std::iter::repeat_with(|| rng.gen()).take(100).collect();
let buf = Buf::new(&mut body[..], ..)
.encapsulate(I::PacketBuilder::new(
src_ip.get(),
config.remote_ip.get(),
64,
IpProto::Tcp,
))
.serialize_vec_outer()
.ok()
.unwrap()
.unwrap_b();
//
// Test with netstack no fowarding
//
let mut builder = DummyEventDispatcherBuilder::from_config(config.clone());
add_arp_or_ndp_table_entry(&mut builder, device.id(), src_ip.get(), src_mac);
let mut ctx = builder.build();
// Should not be a router (default).
assert!(!is_routing_enabled::<_, I>(&ctx, device));
check_other_is_routing_enabled::<I>(&ctx, device, false);
// Receiving a packet not destined for the node should only result in a
// dest unreachable message if routing is enabled.
receive_ip_packet::<_, _, I>(&mut ctx, device, frame_dst, buf.clone());
assert_eq!(ctx.dispatcher().frames_sent().len(), 0);
// Attempting to set router should work, but it still won't be able to
// route packets.
set_routing_enabled::<_, I>(&mut ctx, device, true);
assert!(is_routing_enabled::<_, I>(&ctx, device));
// Should not update other Ip routing status.
check_other_is_routing_enabled::<I>(&ctx, device, false);
receive_ip_packet::<_, _, I>(&mut ctx, device, frame_dst, buf.clone());
// Still should not send ICMP because device has routing disabled.
assert_eq!(ctx.dispatcher().frames_sent().len(), 0);
//
// Test with netstack fowarding
//
let mut state_builder = StackStateBuilder::default();
state_builder.ipv4_builder().forward(true);
state_builder.ipv6_builder().forward(true);
// Most tests do not need NDP's DAD or router solicitation so disable it here.
let mut ndp_configs = ndp::NdpConfigurations::default();
ndp_configs.set_dup_addr_detect_transmits(None);
ndp_configs.set_max_router_solicitations(None);
state_builder.device_builder().set_default_ndp_configs(ndp_configs);
let mut builder = DummyEventDispatcherBuilder::from_config(config.clone());
add_arp_or_ndp_table_entry(&mut builder, device.id(), src_ip.get(), src_mac);
let mut ctx = builder.build_with(state_builder, DummyEventDispatcher::default());
// Should not be a router (default).
assert!(!is_routing_enabled::<_, I>(&ctx, device));
check_other_is_routing_enabled::<I>(&ctx, device, false);
// Receiving a packet not destined for the node should not result in an
// unreachable message when routing is disabled.
receive_ip_packet::<_, _, I>(&mut ctx, device, frame_dst, buf.clone());
assert_eq!(ctx.dispatcher().frames_sent().len(), 0);
// Attempting to set router should work
set_routing_enabled::<_, I>(&mut ctx, device, true);
assert!(is_routing_enabled::<_, I>(&ctx, device));
// Should not update other Ip routing status.
check_other_is_routing_enabled::<I>(&ctx, device, false);
// Should route the packet since routing fully enabled (netstack & device).
receive_ip_packet::<_, _, I>(&mut ctx, device, frame_dst, buf.clone());
assert_eq!(ctx.dispatcher().frames_sent().len(), 1);
println!("{:?}", buf.as_ref());
println!("{:?}", ctx.dispatcher().frames_sent()[0].1);
let (packet_buf, _, _, packet_src_ip, packet_dst_ip, proto, ttl) =
parse_ip_packet_in_ethernet_frame::<I>(&ctx.dispatcher().frames_sent()[0].1[..])
.unwrap();
assert_eq!(src_ip.get(), packet_src_ip);
assert_eq!(config.remote_ip.get(), packet_dst_ip);
assert_eq!(proto, IpProto::Tcp);
assert_eq!(body, packet_buf);
assert_eq!(ttl, 63);
// Test routing a packet to an unknown address.
let buf_unknown_dest = Buf::new(&mut body[..], ..)
.encapsulate(I::PacketBuilder::new(
src_ip.get(),
// Addr must be remote, otherwise this will cause an NDP/ARP
// request rather than ICMP unreachable.
I::get_other_remote_ip_address(10).get(),
64,
IpProto::Tcp,
))
.serialize_vec_outer()
.ok()
.unwrap()
.unwrap_b();
receive_ip_packet::<_, _, I>(&mut ctx, device, frame_dst, buf_unknown_dest);
assert_eq!(ctx.dispatcher().frames_sent().len(), 2);
check_icmp::<I>(&ctx.dispatcher().frames_sent()[1].1);
// Attempt to unset router
set_routing_enabled::<_, I>(&mut ctx, device, false);
assert!(!is_routing_enabled::<_, I>(&ctx, device));
check_other_is_routing_enabled::<I>(&ctx, device, false);
// Should not route packets anymore
receive_ip_packet::<_, _, I>(&mut ctx, device, frame_dst, buf.clone());
assert_eq!(ctx.dispatcher().frames_sent().len(), 2);
}
#[ip_test]
fn test_promiscuous_mode<I: Ip + TestIpExt + IpExt>() {
//
// Test that frames not destined for a device will still be accepted when
// the device is put into promiscuous mode. In all cases, frames that are
// destined for a device must always be accepted.
//
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::from_config(config.clone())
.build::<DummyEventDispatcher>();
let device = DeviceId::new_ethernet(0);
let other_mac = Mac::new([13, 14, 15, 16, 17, 18]);
let buf = Buf::new(Vec::new(), ..)
.encapsulate(I::PacketBuilder::new(
config.remote_ip.get(),
config.local_ip.get(),
64,
IpProto::Tcp,
))
.encapsulate(EthernetFrameBuilder::new(
config.remote_mac,
config.local_mac,
I::ETHER_TYPE,
))
.serialize_vec_outer()
.ok()
.unwrap()
.unwrap_b();
// Accept packet destined for this device if promiscuous mode is off.
crate::device::set_promiscuous_mode(&mut ctx, device, false);
crate::device::receive_frame(&mut ctx, device, buf.clone());
assert_eq!(get_counter_val(&mut ctx, dispatch_receive_ip_packet_name::<I>()), 1);
// Accept packet destined for this device if promiscuous mode is on.
crate::device::set_promiscuous_mode(&mut ctx, device, true);
crate::device::receive_frame(&mut ctx, device, buf.clone());
assert_eq!(get_counter_val(&mut ctx, dispatch_receive_ip_packet_name::<I>()), 2);
let buf = Buf::new(Vec::new(), ..)
.encapsulate(I::PacketBuilder::new(
config.remote_ip.get(),
config.local_ip.get(),
64,
IpProto::Tcp,
))
.encapsulate(EthernetFrameBuilder::new(config.remote_mac, other_mac, I::ETHER_TYPE))
.serialize_vec_outer()
.ok()
.unwrap()
.unwrap_b();
// Reject packet not destined for this device if promiscuous mode is off.
crate::device::set_promiscuous_mode(&mut ctx, device, false);
crate::device::receive_frame(&mut ctx, device, buf.clone());
assert_eq!(get_counter_val(&mut ctx, dispatch_receive_ip_packet_name::<I>()), 2);
// Accept packet not destined for this device if promiscuous mode is on.
crate::device::set_promiscuous_mode(&mut ctx, device, true);
crate::device::receive_frame(&mut ctx, device, buf.clone());
assert_eq!(get_counter_val(&mut ctx, dispatch_receive_ip_packet_name::<I>()), 3);
}
#[ip_test]
fn test_add_remove_ip_addresses<I: Ip + TestIpExt>() {
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
let ip1 = I::get_other_ip_address(1);
let ip2 = I::get_other_ip_address(2);
let ip3 = I::get_other_ip_address(3);
let prefix = I::Addr::BYTES * 8;
let as1 = AddrSubnet::new(ip1.get(), prefix).unwrap();
let as2 = AddrSubnet::new(ip2.get(), prefix).unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
// Add ip1 (ok)
crate::device::add_ip_addr_subnet(&mut ctx, device, as1).unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
// Add ip2 (ok)
crate::device::add_ip_addr_subnet(&mut ctx, device, as2).unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
// Del ip1 (ok)
crate::device::del_ip_addr(&mut ctx, device, &ip1).unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
// Del ip1 again (ip1 not found)
assert_eq!(
crate::device::del_ip_addr(&mut ctx, device, &ip1).unwrap_err(),
AddressError::NotFound
);
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
// Add ip2 again (ip2 already exists)
assert_eq!(
crate::device::add_ip_addr_subnet(&mut ctx, device, as2).unwrap_err(),
AddressError::AlreadyExists
);
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
// Add ip2 with different subnet (ip2 already exists)
assert_eq!(
crate::device::add_ip_addr_subnet(
&mut ctx,
device,
AddrSubnet::new(ip2.get(), prefix - 1).unwrap()
)
.unwrap_err(),
AddressError::AlreadyExists
);
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_some());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip3).is_none());
}
fn receive_simple_ip_packet_test<A: IpAddress>(
ctx: &mut Context<DummyEventDispatcher>,
device: DeviceId,
src_ip: A,
dst_ip: A,
expected: usize,
) {
let buf = Buf::new(Vec::new(), ..)
.encapsulate(<A::Version as IpExt>::PacketBuilder::new(
src_ip,
dst_ip,
64,
IpProto::Tcp,
))
.serialize_vec_outer()
.ok()
.unwrap()
.into_inner();
receive_ip_packet::<_, _, A::Version>(ctx, device, FrameDestination::Unicast, buf);
assert_eq!(get_counter_val(ctx, dispatch_receive_ip_packet_name::<A::Version>()), expected);
}
#[ip_test]
fn test_multiple_ip_addresses<I: Ip + TestIpExt>() {
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
let ip1 = I::get_other_ip_address(1);
let ip2 = I::get_other_ip_address(2);
let from_ip = I::get_other_ip_address(3).get();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_none());
// Should not receive packets on any ip.
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 0);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 0);
// Add ip1 to device.
crate::device::add_ip_addr_subnet(
&mut ctx,
device,
AddrSubnet::new(ip1.get(), I::Addr::BYTES * 8).unwrap(),
)
.unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).unwrap().is_assigned());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).is_none());
// Should receive packets on ip1 but not ip2
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 1);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 1);
// Add ip2 to device.
crate::device::add_ip_addr_subnet(
&mut ctx,
device,
AddrSubnet::new(ip2.get(), I::Addr::BYTES * 8).unwrap(),
)
.unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).unwrap().is_assigned());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).unwrap().is_assigned());
// Should receive packets on both ips
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 2);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 3);
// Remove ip1
crate::device::del_ip_addr(&mut ctx, device, &ip1).unwrap();
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip1).is_none());
assert!(crate::device::get_ip_addr_state(&ctx, device, &ip2).unwrap().is_assigned());
// Should receive packets on ip2
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 3);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 4);
}
/// Get a multicast address.
#[specialize_ip]
fn get_multicast_addr<I: Ip>() -> MulticastAddr<I::Addr> {
#[ipv4]
return MulticastAddr::new(Ipv4Addr::new([224, 0, 0, 1])).unwrap();
#[ipv6]
return MulticastAddr::new(Ipv6Addr::new([
0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
]))
.unwrap();
}
/// Test that we can join and leave a multicast group, but we only truly leave it after
/// calling `leave_ip_multicast` the same number of times as `join_ip_multicast`.
#[ip_test]
fn test_ip_join_leave_multicast_addr_ref_count<I: Ip + TestIpExt>() {
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
let multicast_addr = get_multicast_addr::<I>();
// Should not be in the multicast group yet.
assert!(!crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Join the multicast group.
crate::device::join_ip_multicast(&mut ctx, device, multicast_addr);
assert!(crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Leave the multicast group.
crate::device::leave_ip_multicast(&mut ctx, device, multicast_addr);
assert!(!crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Join the multicst group.
crate::device::join_ip_multicast(&mut ctx, device, multicast_addr);
assert!(crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Join it again...
crate::device::join_ip_multicast(&mut ctx, device, multicast_addr);
assert!(crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Leave it (still in it because we joined twice).
crate::device::leave_ip_multicast(&mut ctx, device, multicast_addr);
assert!(crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Leave it again... (actually left now).
crate::device::leave_ip_multicast(&mut ctx, device, multicast_addr);
assert!(!crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
}
/// Test leaving a multicast group a device has not yet joined.
///
/// # Panics
///
/// This method should always panic as leaving an unjoined multicast group is a panic
/// condition.
#[ip_test]
#[should_panic(expected = "attempted to leave IP multicast group we were not a member of:")]
fn test_ip_leave_unjoined_multicast<I: Ip + TestIpExt>() {
let config = I::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
let multicast_addr = get_multicast_addr::<I>();
// Should not be in the multicast group yet.
assert!(!crate::device::is_in_ip_multicast(&mut ctx, device, multicast_addr));
// Leave it (this should panic).
crate::device::leave_ip_multicast(&mut ctx, device, multicast_addr);
}
#[test]
fn test_ipv6_duplicate_solicited_node_address() {
//
// Test that we still receive packets destined to a solicited-node multicast address of an
// IP address we deleted because another (distinct) IP address that is still assigned uses
// the same solicited-node multicast address.
//
let config = Ipv6::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
crate::device::initialize_device(&mut ctx, device);
let ip1 =
SpecifiedAddr::new(Ipv6Addr::new([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))
.unwrap();
let ip2 =
SpecifiedAddr::new(Ipv6Addr::new([0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1]))
.unwrap();
let from_ip = Ipv6Addr::new([0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1]);
// ip1 and ip2 are not equal but their solicited node addresses are the same.
assert_ne!(ip1, ip2);
assert_eq!(ip1.to_solicited_node_address(), ip2.to_solicited_node_address());
let sn_addr = ip1.to_solicited_node_address().get();
let addr_sub1 = AddrSubnet::new(ip1.get(), 64).unwrap();
let addr_sub2 = AddrSubnet::new(ip2.get(), 64).unwrap();
assert_eq!(get_counter_val(&mut ctx, "dispatch_receive_ip_packet"), 0);
// Add ip1 to the device.
//
// Should get packets destined for the solicited node address and ip1.
crate::device::add_ip_addr_subnet(&mut ctx, device, addr_sub1).unwrap();
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 1);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 1);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, sn_addr, 2);
// Add ip2 to the device.
//
// Should get packets destined for the solicited node address, ip1 and ip2.
crate::device::add_ip_addr_subnet(&mut ctx, device, addr_sub2).unwrap();
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 3);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 4);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, sn_addr, 5);
// Remove ip1 from the device.
//
// Should get packets destined for the solicited node address and ip2.
crate::device::del_ip_addr(&mut ctx, device, &ip1).unwrap();
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip1.get(), 5);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, ip2.get(), 6);
receive_simple_ip_packet_test(&mut ctx, device, from_ip, sn_addr, 7);
}
#[test]
fn test_get_ip_addr_subnet() {
//
// Test that `get_ip_addr_subnet` only returns non-local IPv6 addresses.
//
let config = Ipv6::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
assert_eq!(device.id, 0);
let device = EthernetDeviceId(0);
// `initialize_device` adds the MAC-derived link-local IPv6 address.
initialize_device(&mut ctx, device);
let addr_sub = &ctx.state().device.ethernet.get(0).unwrap().device.ip.ipv6_addr_sub;
// Verify that there is a single assigned address - the MAC-derived link-local.
assert_eq!(addr_sub.len(), 1);
assert_eq!(
addr_sub[0].addr_sub().addr().get(),
config.local_mac.to_ipv6_link_local().addr().get()
);
// Verify that `get_ip_addr_subnet` returns no address since the only
// address present is link-local.
assert_eq!(get_ip_addr_subnet::<_, Ipv6Addr>(&ctx, device), None);
}
#[test]
fn test_add_ip_addr_subnet_link_local() {
//
// Test that `add_ip_addr_subnet` allows link-local addresses.
//
let config = Ipv6::DUMMY_CONFIG;
let mut ctx = DummyEventDispatcherBuilder::default().build::<DummyEventDispatcher>();
let device =
ctx.state_mut().add_ethernet_device(config.local_mac, Ipv6::MINIMUM_LINK_MTU.into());
assert_eq!(device.id, 0);
let device = EthernetDeviceId(0);
initialize_device(&mut ctx, device);
// Verify that there is a single assigned address.
assert_eq!(ctx.state().device.ethernet.get(0).unwrap().device.ip.ipv6_addr_sub.len(), 1);
add_ip_addr_subnet(
&mut ctx,
device,
AddrSubnet::new(Ipv6::LINK_LOCAL_UNICAST_SUBNET.network(), 128).unwrap(),
)
.unwrap();
// Assert that the new address got added.
let addr_sub = &ctx.state().device.ethernet.get(0).unwrap().device.ip.ipv6_addr_sub;
assert_eq!(addr_sub.len(), 2);
assert_eq!(addr_sub[1].addr_sub().addr().get(), Ipv6::LINK_LOCAL_UNICAST_SUBNET.network());
}
}
| 38.309653 | 181 | 0.628557 |
227c88f47cbf374d3ac52e0fbd2abb8dff7325fe | 1,198 | use {
crate::alloc,
alloc::{Alloc, AllocErr},
solana_rbpf::aligned_memory::AlignedMemory,
std::alloc::Layout,
};
#[derive(Debug)]
pub struct BpfAllocator {
#[allow(dead_code)]
heap: AlignedMemory,
start: u64,
len: u64,
pos: u64,
}
impl BpfAllocator {
pub fn new(heap: AlignedMemory, virtual_address: u64) -> Self {
let len = heap.len() as u64;
Self {
heap,
start: virtual_address,
len,
pos: 0,
}
}
}
impl Alloc for BpfAllocator {
fn alloc(&mut self, layout: Layout) -> Result<u64, AllocErr> {
let bytes_to_align = (self.pos as *const u8).align_offset(layout.align()) as u64;
if self
.pos
.saturating_add(layout.size() as u64)
.saturating_add(bytes_to_align)
<= self.len
{
self.pos += bytes_to_align;
let addr = self.start + self.pos;
self.pos += layout.size() as u64;
Ok(addr)
} else {
Err(AllocErr)
}
}
fn dealloc(&mut self, _addr: u64, _layout: Layout) {
// It's a bump allocator, free not supported
}
}
| 23.490196 | 89 | 0.534224 |
ef579a6316b2e7ac76a9eafd8097eadfbad8081e | 446 | use shiplift::{ContainerOptions, Docker};
use std::env;
#[tokio::main]
async fn main() {
let docker = Docker::new();
let image = env::args()
.nth(1)
.expect("You need to specify an image name");
match docker
.containers()
.create(&ContainerOptions::builder(image.as_ref()).build())
.await
{
Ok(info) => println!("{:?}", info),
Err(e) => eprintln!("Error: {}", e),
}
}
| 22.3 | 67 | 0.533632 |
71a2401685bfdba5a94367b61713d0a123951dba | 45,048 | use crate::tree_hash::bitfield_bytes_tree_hash_root;
use crate::Error;
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor};
use ssz::{Decode, Encode};
use tree_hash::Hash256;
use typenum::Unsigned;
/// A marker trait applied to `Variable` and `Fixed` that defines the behaviour of a `Bitfield`.
pub trait BitfieldBehaviour: Clone {}
/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`.
///
/// See the [`Bitfield`](struct.Bitfield.html) docs for usage.
#[derive(Clone, PartialEq, Debug)]
pub struct Variable<N> {
_phantom: PhantomData<N>,
}
/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`.
///
/// See the [`Bitfield`](struct.Bitfield.html) docs for usage.
#[derive(Clone, PartialEq, Debug)]
pub struct Fixed<N> {
_phantom: PhantomData<N>,
}
impl<N: Unsigned + Clone> BitfieldBehaviour for Variable<N> {}
impl<N: Unsigned + Clone> BitfieldBehaviour for Fixed<N> {}
/// A heap-allocated, ordered, variable-length collection of `bool` values, limited to `N` bits.
pub type BitList<N> = Bitfield<Variable<N>>;
/// A heap-allocated, ordered, fixed-length collection of `bool` values, with `N` bits.
///
/// See [Bitfield](struct.Bitfield.html) documentation.
pub type BitVector<N> = Bitfield<Fixed<N>>;
/// A heap-allocated, ordered, fixed-length, collection of `bool` values. Use of
/// [`BitList`](type.BitList.html) or [`BitVector`](type.BitVector.html) type aliases is preferred
/// over direct use of this struct.
///
/// The `T` type parameter is used to define length behaviour with the `Variable` or `Fixed` marker
/// structs.
///
/// The length of the Bitfield is set at instantiation (i.e., runtime, not compile time). However,
/// use with a `Variable` sets a type-level (i.e., compile-time) maximum length and `Fixed`
/// provides a type-level fixed length.
///
/// ## Example
///
/// The example uses the following crate-level type aliases:
///
/// - `BitList<N>` is an alias for `Bitfield<Variable<N>>`
/// - `BitVector<N>` is an alias for `Bitfield<Fixed<N>>`
///
/// ```
/// use ssz_types::{BitVector, BitList, typenum};
///
/// // `BitList` has a type-level maximum length. The length of the list is specified at runtime
/// // and it must be less than or equal to `N`. After instantiation, `BitList` cannot grow or
/// // shrink.
/// type BitList8 = BitList<typenum::U8>;
///
/// // Creating a `BitList` with a larger-than-`N` capacity returns `None`.
/// assert!(BitList8::with_capacity(9).is_err());
///
/// let mut bitlist = BitList8::with_capacity(4).unwrap(); // `BitList` permits a capacity of less than the maximum.
/// assert!(bitlist.set(3, true).is_ok()); // Setting inside the instantiation capacity is permitted.
/// assert!(bitlist.set(5, true).is_err()); // Setting outside that capacity is not.
///
/// // `BitVector` has a type-level fixed length. Unlike `BitList`, it cannot be instantiated with a custom length
/// // or grow/shrink.
/// type BitVector8 = BitVector<typenum::U8>;
///
/// let mut bitvector = BitVector8::new();
/// assert_eq!(bitvector.len(), 8); // `BitVector` length is fixed at the type-level.
/// assert!(bitvector.set(7, true).is_ok()); // Setting inside the capacity is permitted.
/// assert!(bitvector.set(9, true).is_err()); // Setting outside the capacity is not.
///
/// ```
///
/// ## Note
///
/// The internal representation of the bitfield is the same as that required by SSZ. The lowest
/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest
/// bit-index. E.g., `vec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set.
#[derive(Clone, Debug, PartialEq)]
pub struct Bitfield<T> {
bytes: Vec<u8>,
len: usize,
_phantom: PhantomData<T>,
}
impl<N: Unsigned + Clone> Bitfield<Variable<N>> {
/// Instantiate with capacity for `num_bits` boolean values. The length cannot be grown or
/// shrunk after instantiation.
///
/// All bits are initialized to `false`.
///
/// Returns `None` if `num_bits > N`.
pub fn with_capacity(num_bits: usize) -> Result<Self, Error> {
if num_bits <= N::to_usize() {
Ok(Self {
bytes: vec![0; bytes_for_bit_len(num_bits)],
len: num_bits,
_phantom: PhantomData,
})
} else {
Err(Error::OutOfBounds {
i: Self::max_len(),
len: Self::max_len(),
})
}
}
/// Equal to `N` regardless of the value supplied to `with_capacity`.
pub fn max_len() -> usize {
N::to_usize()
}
/// Consumes `self`, returning a serialized representation.
///
/// The output is faithful to the SSZ encoding of `self`, such that a leading `true` bit is
/// used to indicate the length of the bitfield.
///
/// ## Example
/// ```
/// use ssz_types::{BitList, typenum};
///
/// type BitList8 = BitList<typenum::U8>;
///
/// let b = BitList8::with_capacity(4).unwrap();
///
/// assert_eq!(b.into_bytes(), vec![0b0001_0000]);
/// ```
pub fn into_bytes(self) -> Vec<u8> {
let len = self.len();
let mut bytes = self.bytes;
bytes.resize(bytes_for_bit_len(len + 1), 0);
let mut bitfield: Bitfield<Variable<N>> = Bitfield::from_raw_bytes(bytes, len + 1)
.unwrap_or_else(|_| {
unreachable!(
"Bitfield with {} bytes must have enough capacity for {} bits.",
bytes_for_bit_len(len + 1),
len + 1
)
});
bitfield
.set(len, true)
.expect("len must be in bounds for bitfield.");
bitfield.bytes
}
/// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()`
/// produces (SSZ).
///
/// Returns `None` if `bytes` are not a valid encoding.
pub fn from_bytes(bytes: Vec<u8>) -> Result<Self, Error> {
let bytes_len = bytes.len();
let mut initial_bitfield: Bitfield<Variable<N>> = {
let num_bits = bytes.len() * 8;
Bitfield::from_raw_bytes(bytes, num_bits)?
};
let len = initial_bitfield
.highest_set_bit()
.ok_or(Error::MissingLengthInformation)?;
// The length bit should be in the last byte, or else it means we have too many bytes.
if len / 8 + 1 != bytes_len {
return Err(Error::InvalidByteCount {
given: bytes_len,
expected: len / 8 + 1,
});
}
if len <= Self::max_len() {
initial_bitfield
.set(len, false)
.expect("Bit has been confirmed to exist");
let mut bytes = initial_bitfield.into_raw_bytes();
bytes.truncate(bytes_for_bit_len(len));
Self::from_raw_bytes(bytes, len)
} else {
Err(Error::OutOfBounds {
i: Self::max_len(),
len: Self::max_len(),
})
}
}
/// Compute the intersection of two BitLists of potentially different lengths.
///
/// Return a new BitList with length equal to the shorter of the two inputs.
pub fn intersection(&self, other: &Self) -> Self {
let min_len = std::cmp::min(self.len(), other.len());
let mut result = Self::with_capacity(min_len).expect("min len always less than N");
// Bitwise-and the bytes together, starting from the left of each vector. This takes care
// of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't
// contain any set bits beyond its length.
for i in 0..result.bytes.len() {
result.bytes[i] = self.bytes[i] & other.bytes[i];
}
result
}
/// Compute the union of two BitLists of potentially different lengths.
///
/// Return a new BitList with length equal to the longer of the two inputs.
pub fn union(&self, other: &Self) -> Self {
let max_len = std::cmp::max(self.len(), other.len());
let mut result = Self::with_capacity(max_len).expect("max len always less than N");
for i in 0..result.bytes.len() {
result.bytes[i] =
self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0);
}
result
}
}
impl<N: Unsigned + Clone> Bitfield<Fixed<N>> {
/// Instantiate a new `Bitfield` with a fixed-length of `N` bits.
///
/// All bits are initialized to `false`.
pub fn new() -> Self {
Self {
bytes: vec![0; bytes_for_bit_len(Self::capacity())],
len: Self::capacity(),
_phantom: PhantomData,
}
}
/// Returns `N`, the number of bits in `Self`.
pub fn capacity() -> usize {
N::to_usize()
}
/// Consumes `self`, returning a serialized representation.
///
/// The output is faithful to the SSZ encoding of `self`.
///
/// ## Example
/// ```
/// use ssz_types::{BitVector, typenum};
///
/// type BitVector4 = BitVector<typenum::U4>;
///
/// assert_eq!(BitVector4::new().into_bytes(), vec![0b0000_0000]);
/// ```
pub fn into_bytes(self) -> Vec<u8> {
self.into_raw_bytes()
}
/// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()`
/// produces (SSZ).
///
/// Returns `None` if `bytes` are not a valid encoding.
pub fn from_bytes(bytes: Vec<u8>) -> Result<Self, Error> {
Self::from_raw_bytes(bytes, Self::capacity())
}
/// Compute the intersection of two fixed-length `Bitfield`s.
///
/// Return a new fixed-length `Bitfield`.
pub fn intersection(&self, other: &Self) -> Self {
let mut result = Self::new();
// Bitwise-and the bytes together, starting from the left of each vector. This takes care
// of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't
// contain any set bits beyond its length.
for i in 0..result.bytes.len() {
result.bytes[i] = self.bytes[i] & other.bytes[i];
}
result
}
/// Compute the union of two fixed-length `Bitfield`s.
///
/// Return a new fixed-length `Bitfield`.
pub fn union(&self, other: &Self) -> Self {
let mut result = Self::new();
for i in 0..result.bytes.len() {
result.bytes[i] =
self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0);
}
result
}
}
impl<N: Unsigned + Clone> Default for Bitfield<Fixed<N>> {
fn default() -> Self {
Self::new()
}
}
impl<T: BitfieldBehaviour> Bitfield<T> {
/// Sets the `i`'th bit to `value`.
///
/// Returns `None` if `i` is out-of-bounds of `self`.
pub fn set(&mut self, i: usize, value: bool) -> Result<(), Error> {
let len = self.len;
if i < len {
let byte = self
.bytes
.get_mut(i / 8)
.ok_or(Error::OutOfBounds { i, len })?;
if value {
*byte |= 1 << (i % 8)
} else {
*byte &= !(1 << (i % 8))
}
Ok(())
} else {
Err(Error::OutOfBounds { i, len: self.len })
}
}
/// Returns the value of the `i`'th bit.
///
/// Returns `Error` if `i` is out-of-bounds of `self`.
pub fn get(&self, i: usize) -> Result<bool, Error> {
if i < self.len {
let byte = self
.bytes
.get(i / 8)
.ok_or(Error::OutOfBounds { i, len: self.len })?;
Ok(*byte & 1 << (i % 8) > 0)
} else {
Err(Error::OutOfBounds { i, len: self.len })
}
}
/// Returns the number of bits stored in `self`.
pub fn len(&self) -> usize {
self.len
}
/// Returns `true` if `self.len() == 0`.
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns the underlying bytes representation of the bitfield.
pub fn into_raw_bytes(self) -> Vec<u8> {
self.bytes
}
/// Returns a view into the underlying bytes representation of the bitfield.
pub fn as_slice(&self) -> &[u8] {
&self.bytes
}
/// Instantiates from the given `bytes`, which are the same format as output from
/// `self.into_raw_bytes()`.
///
/// Returns `None` if:
///
/// - `bytes` is not the minimal required bytes to represent a bitfield of `bit_len` bits.
/// - `bit_len` is not a multiple of 8 and `bytes` contains set bits that are higher than, or
/// equal to `bit_len`.
fn from_raw_bytes(bytes: Vec<u8>, bit_len: usize) -> Result<Self, Error> {
if bit_len == 0 {
if bytes.len() == 1 && bytes == [0] {
// A bitfield with `bit_len` 0 can only be represented by a single zero byte.
Ok(Self {
bytes,
len: 0,
_phantom: PhantomData,
})
} else {
Err(Error::ExcessBits)
}
} else if bytes.len() != bytes_for_bit_len(bit_len) {
// The number of bytes must be the minimum required to represent `bit_len`.
Err(Error::InvalidByteCount {
given: bytes.len(),
expected: bytes_for_bit_len(bit_len),
})
} else {
// Ensure there are no bits higher than `bit_len` that are set to true.
let (mask, _) = u8::max_value().overflowing_shr(8 - (bit_len as u32 % 8));
if (bytes.last().expect("Guarded against empty bytes") & !mask) == 0 {
Ok(Self {
bytes,
len: bit_len,
_phantom: PhantomData,
})
} else {
Err(Error::ExcessBits)
}
}
}
/// Returns the `Some(i)` where `i` is the highest index with a set bit. Returns `None` if
/// there are no set bits.
pub fn highest_set_bit(&self) -> Option<usize> {
self.bytes
.iter()
.enumerate()
.rev()
.find(|(_, byte)| **byte > 0)
.map(|(i, byte)| i * 8 + 7 - byte.leading_zeros() as usize)
}
/// Returns an iterator across bitfield `bool` values, starting at the lowest index.
pub fn iter(&self) -> BitIter<'_, T> {
BitIter {
bitfield: self,
i: 0,
}
}
/// Returns true if no bits are set.
pub fn is_zero(&self) -> bool {
self.bytes.iter().all(|byte| *byte == 0)
}
/// Returns the number of bits that are set to `true`.
pub fn num_set_bits(&self) -> usize {
self.bytes
.iter()
.map(|byte| byte.count_ones() as usize)
.sum()
}
/// Compute the difference of this Bitfield and another of potentially different length.
pub fn difference(&self, other: &Self) -> Self {
let mut result = self.clone();
result.difference_inplace(other);
result
}
/// Compute the difference of this Bitfield and another of potentially different length.
pub fn difference_inplace(&mut self, other: &Self) {
let min_byte_len = std::cmp::min(self.bytes.len(), other.bytes.len());
for i in 0..min_byte_len {
self.bytes[i] &= !other.bytes[i];
}
}
/// Shift the bits to higher indices, filling the lower indices with zeroes.
///
/// The amount to shift by, `n`, must be less than or equal to `self.len()`.
pub fn shift_up(&mut self, n: usize) -> Result<(), Error> {
if n <= self.len() {
// Shift the bits up (starting from the high indices to avoid overwriting)
for i in (n..self.len()).rev() {
self.set(i, self.get(i - n)?)?;
}
// Zero the low bits
for i in 0..n {
self.set(i, false).unwrap();
}
Ok(())
} else {
Err(Error::OutOfBounds {
i: n,
len: self.len(),
})
}
}
}
/// Returns the minimum required bytes to represent a given number of bits.
///
/// `bit_len == 0` requires a single byte.
fn bytes_for_bit_len(bit_len: usize) -> usize {
std::cmp::max(1, (bit_len + 7) / 8)
}
/// An iterator over the bits in a `Bitfield`.
pub struct BitIter<'a, T> {
bitfield: &'a Bitfield<T>,
i: usize,
}
impl<'a, T: BitfieldBehaviour> Iterator for BitIter<'a, T> {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
let res = self.bitfield.get(self.i).ok()?;
self.i += 1;
Some(res)
}
}
impl<N: Unsigned + Clone> Encode for Bitfield<Variable<N>> {
fn is_ssz_fixed_len() -> bool {
false
}
fn ssz_bytes_len(&self) -> usize {
// We could likely do better than turning this into bytes and reading the length, however
// it is kept this way for simplicity.
self.clone().into_bytes().len()
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
buf.append(&mut self.clone().into_bytes())
}
}
impl<N: Unsigned + Clone> Decode for Bitfield<Variable<N>> {
fn is_ssz_fixed_len() -> bool {
false
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
Self::from_bytes(bytes.to_vec()).map_err(|e| {
ssz::DecodeError::BytesInvalid(format!("BitList failed to decode: {:?}", e))
})
}
}
impl<N: Unsigned + Clone> Encode for Bitfield<Fixed<N>> {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_bytes_len(&self) -> usize {
self.as_slice().len()
}
fn ssz_fixed_len() -> usize {
bytes_for_bit_len(N::to_usize())
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
buf.append(&mut self.clone().into_bytes())
}
}
impl<N: Unsigned + Clone> Decode for Bitfield<Fixed<N>> {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
bytes_for_bit_len(N::to_usize())
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
Self::from_bytes(bytes.to_vec()).map_err(|e| {
ssz::DecodeError::BytesInvalid(format!("BitVector failed to decode: {:?}", e))
})
}
}
impl<N: Unsigned + Clone> Serialize for Bitfield<Variable<N>> {
/// Serde serialization is compliant with the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&hex_encode(self.as_ssz_bytes()))
}
}
impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield<Variable<N>> {
/// Serde serialization is compliant with the Ethereum YAML test format.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?;
Self::from_ssz_bytes(&bytes)
.map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e)))
}
}
impl<N: Unsigned + Clone> Serialize for Bitfield<Fixed<N>> {
/// Serde serialization is compliant with the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&hex_encode(self.as_ssz_bytes()))
}
}
impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield<Fixed<N>> {
/// Serde serialization is compliant with the Ethereum YAML test format.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?;
Self::from_ssz_bytes(&bytes)
.map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e)))
}
}
impl<N: Unsigned + Clone> tree_hash::TreeHash for Bitfield<Variable<N>> {
fn tree_hash_type() -> tree_hash::TreeHashType {
tree_hash::TreeHashType::List
}
fn tree_hash_packed_encoding(&self) -> Vec<u8> {
unreachable!("List should never be packed.")
}
fn tree_hash_packing_factor() -> usize {
unreachable!("List should never be packed.")
}
fn tree_hash_root(&self) -> Hash256 {
// Note: we use `as_slice` because it does _not_ have the length-delimiting bit set (or
// present).
let root = bitfield_bytes_tree_hash_root::<N>(self.as_slice());
tree_hash::mix_in_length(&root, self.len())
}
}
impl<N: Unsigned + Clone> tree_hash::TreeHash for Bitfield<Fixed<N>> {
fn tree_hash_type() -> tree_hash::TreeHashType {
tree_hash::TreeHashType::Vector
}
fn tree_hash_packed_encoding(&self) -> Vec<u8> {
unreachable!("Vector should never be packed.")
}
fn tree_hash_packing_factor() -> usize {
unreachable!("Vector should never be packed.")
}
fn tree_hash_root(&self) -> Hash256 {
bitfield_bytes_tree_hash_root::<N>(self.as_slice())
}
}
#[cfg(feature = "arbitrary")]
impl<N: 'static + Unsigned> arbitrary::Arbitrary for Bitfield<Fixed<N>> {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
let size = N::to_usize();
let mut vec: Vec<u8> = vec![0u8; size];
u.fill_buffer(&mut vec)?;
Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?)
}
}
#[cfg(feature = "arbitrary")]
impl<N: 'static + Unsigned> arbitrary::Arbitrary for Bitfield<Variable<N>> {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
let max_size = N::to_usize();
let rand = usize::arbitrary(u)?;
let size = std::cmp::min(rand, max_size);
let mut vec: Vec<u8> = vec![0u8; size];
u.fill_buffer(&mut vec)?;
Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?)
}
}
#[cfg(test)]
mod bitvector {
use super::*;
use crate::BitVector;
pub type BitVector0 = BitVector<typenum::U0>;
pub type BitVector1 = BitVector<typenum::U1>;
pub type BitVector4 = BitVector<typenum::U4>;
pub type BitVector8 = BitVector<typenum::U8>;
pub type BitVector16 = BitVector<typenum::U16>;
pub type BitVector64 = BitVector<typenum::U64>;
#[test]
fn ssz_encode() {
assert_eq!(BitVector0::new().as_ssz_bytes(), vec![0b0000_0000]);
assert_eq!(BitVector1::new().as_ssz_bytes(), vec![0b0000_0000]);
assert_eq!(BitVector4::new().as_ssz_bytes(), vec![0b0000_0000]);
assert_eq!(BitVector8::new().as_ssz_bytes(), vec![0b0000_0000]);
assert_eq!(
BitVector16::new().as_ssz_bytes(),
vec![0b0000_0000, 0b0000_0000]
);
let mut b = BitVector8::new();
for i in 0..8 {
b.set(i, true).unwrap();
}
assert_eq!(b.as_ssz_bytes(), vec![255]);
let mut b = BitVector4::new();
for i in 0..4 {
b.set(i, true).unwrap();
}
assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111]);
}
#[test]
fn ssz_decode() {
assert!(BitVector0::from_ssz_bytes(&[0b0000_0000]).is_ok());
assert!(BitVector0::from_ssz_bytes(&[0b0000_0001]).is_err());
assert!(BitVector0::from_ssz_bytes(&[0b0000_0010]).is_err());
assert!(BitVector1::from_ssz_bytes(&[0b0000_0001]).is_ok());
assert!(BitVector1::from_ssz_bytes(&[0b0000_0010]).is_err());
assert!(BitVector1::from_ssz_bytes(&[0b0000_0100]).is_err());
assert!(BitVector1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0000]).is_ok());
assert!(BitVector8::from_ssz_bytes(&[1, 0b0000_0000]).is_err());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0000, 1]).is_err());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0001]).is_ok());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0010]).is_ok());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0001]).is_err());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0010]).is_err());
assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0100]).is_err());
assert!(BitVector16::from_ssz_bytes(&[0b0000_0000]).is_err());
assert!(BitVector16::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_ok());
assert!(BitVector16::from_ssz_bytes(&[1, 0b0000_0000, 0b0000_0000]).is_err());
}
#[test]
fn intersection() {
let a = BitVector16::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap();
let b = BitVector16::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
let c = BitVector16::from_raw_bytes(vec![0b1000, 0b0001], 16).unwrap();
assert_eq!(a.intersection(&b), c);
assert_eq!(b.intersection(&a), c);
assert_eq!(a.intersection(&c), c);
assert_eq!(b.intersection(&c), c);
assert_eq!(a.intersection(&a), a);
assert_eq!(b.intersection(&b), b);
assert_eq!(c.intersection(&c), c);
}
#[test]
fn intersection_diff_length() {
let a = BitVector16::from_bytes(vec![0b0010_1110, 0b0010_1011]).unwrap();
let b = BitVector16::from_bytes(vec![0b0010_1101, 0b0000_0001]).unwrap();
let c = BitVector16::from_bytes(vec![0b0010_1100, 0b0000_0001]).unwrap();
assert_eq!(a.len(), 16);
assert_eq!(b.len(), 16);
assert_eq!(c.len(), 16);
assert_eq!(a.intersection(&b), c);
assert_eq!(b.intersection(&a), c);
}
#[test]
fn union() {
let a = BitVector16::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap();
let b = BitVector16::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
let c = BitVector16::from_raw_bytes(vec![0b1111, 0b1001], 16).unwrap();
assert_eq!(a.union(&b), c);
assert_eq!(b.union(&a), c);
assert_eq!(a.union(&a), a);
assert_eq!(b.union(&b), b);
assert_eq!(c.union(&c), c);
}
#[test]
fn union_diff_length() {
let a = BitVector16::from_bytes(vec![0b0010_1011, 0b0010_1110]).unwrap();
let b = BitVector16::from_bytes(vec![0b0000_0001, 0b0010_1101]).unwrap();
let c = BitVector16::from_bytes(vec![0b0010_1011, 0b0010_1111]).unwrap();
assert_eq!(a.len(), c.len());
assert_eq!(a.union(&b), c);
assert_eq!(b.union(&a), c);
}
#[test]
fn ssz_round_trip() {
assert_round_trip(BitVector0::new());
let mut b = BitVector1::new();
b.set(0, true).unwrap();
assert_round_trip(b);
let mut b = BitVector8::new();
for j in 0..8 {
if j % 2 == 0 {
b.set(j, true).unwrap();
}
}
assert_round_trip(b);
let mut b = BitVector8::new();
for j in 0..8 {
b.set(j, true).unwrap();
}
assert_round_trip(b);
let mut b = BitVector16::new();
for j in 0..16 {
if j % 2 == 0 {
b.set(j, true).unwrap();
}
}
assert_round_trip(b);
let mut b = BitVector16::new();
for j in 0..16 {
b.set(j, true).unwrap();
}
assert_round_trip(b);
}
fn assert_round_trip<T: Encode + Decode + PartialEq + std::fmt::Debug>(t: T) {
assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t);
}
#[test]
fn ssz_bytes_len() {
for i in 0..64 {
let mut bitfield = BitVector64::new();
for j in 0..i {
bitfield.set(j, true).expect("should set bit in bounds");
}
let bytes = bitfield.as_ssz_bytes();
assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i);
}
}
#[test]
fn excess_bits_nimbus() {
let bad = vec![0b0001_1111];
assert!(BitVector4::from_ssz_bytes(&bad).is_err());
}
}
#[cfg(test)]
#[allow(clippy::cognitive_complexity)]
mod bitlist {
use super::*;
use crate::BitList;
pub type BitList0 = BitList<typenum::U0>;
pub type BitList1 = BitList<typenum::U1>;
pub type BitList8 = BitList<typenum::U8>;
pub type BitList16 = BitList<typenum::U16>;
pub type BitList1024 = BitList<typenum::U1024>;
#[test]
fn ssz_encode() {
assert_eq!(
BitList0::with_capacity(0).unwrap().as_ssz_bytes(),
vec![0b0000_0001],
);
assert_eq!(
BitList1::with_capacity(0).unwrap().as_ssz_bytes(),
vec![0b0000_0001],
);
assert_eq!(
BitList1::with_capacity(1).unwrap().as_ssz_bytes(),
vec![0b0000_0010],
);
assert_eq!(
BitList8::with_capacity(8).unwrap().as_ssz_bytes(),
vec![0b0000_0000, 0b0000_0001],
);
assert_eq!(
BitList8::with_capacity(7).unwrap().as_ssz_bytes(),
vec![0b1000_0000]
);
let mut b = BitList8::with_capacity(8).unwrap();
for i in 0..8 {
b.set(i, true).unwrap();
}
assert_eq!(b.as_ssz_bytes(), vec![255, 0b0000_0001]);
let mut b = BitList8::with_capacity(8).unwrap();
for i in 0..4 {
b.set(i, true).unwrap();
}
assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111, 0b0000_0001]);
assert_eq!(
BitList16::with_capacity(16).unwrap().as_ssz_bytes(),
vec![0b0000_0000, 0b0000_0000, 0b0000_0001]
);
}
#[test]
fn ssz_decode() {
assert!(BitList0::from_ssz_bytes(&[]).is_err());
assert!(BitList1::from_ssz_bytes(&[]).is_err());
assert!(BitList8::from_ssz_bytes(&[]).is_err());
assert!(BitList16::from_ssz_bytes(&[]).is_err());
assert!(BitList0::from_ssz_bytes(&[0b0000_0000]).is_err());
assert!(BitList1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err());
assert!(BitList8::from_ssz_bytes(&[0b0000_0000]).is_err());
assert!(BitList16::from_ssz_bytes(&[0b0000_0000]).is_err());
assert!(BitList0::from_ssz_bytes(&[0b0000_0001]).is_ok());
assert!(BitList0::from_ssz_bytes(&[0b0000_0010]).is_err());
assert!(BitList1::from_ssz_bytes(&[0b0000_0001]).is_ok());
assert!(BitList1::from_ssz_bytes(&[0b0000_0010]).is_ok());
assert!(BitList1::from_ssz_bytes(&[0b0000_0100]).is_err());
assert!(BitList8::from_ssz_bytes(&[0b0000_0001]).is_ok());
assert!(BitList8::from_ssz_bytes(&[0b0000_0010]).is_ok());
assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0001]).is_ok());
assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0010]).is_err());
assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err());
}
#[test]
fn ssz_decode_extra_bytes() {
assert!(BitList0::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err());
assert!(BitList1::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err());
assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err());
assert!(BitList16::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err());
assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0]).is_err());
assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0]).is_err());
assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0, 0, 0]).is_err());
}
#[test]
fn ssz_round_trip() {
assert_round_trip(BitList0::with_capacity(0).unwrap());
for i in 0..2 {
assert_round_trip(BitList1::with_capacity(i).unwrap());
}
for i in 0..9 {
assert_round_trip(BitList8::with_capacity(i).unwrap());
}
for i in 0..17 {
assert_round_trip(BitList16::with_capacity(i).unwrap());
}
let mut b = BitList1::with_capacity(1).unwrap();
b.set(0, true).unwrap();
assert_round_trip(b);
for i in 0..8 {
let mut b = BitList8::with_capacity(i).unwrap();
for j in 0..i {
if j % 2 == 0 {
b.set(j, true).unwrap();
}
}
assert_round_trip(b);
let mut b = BitList8::with_capacity(i).unwrap();
for j in 0..i {
b.set(j, true).unwrap();
}
assert_round_trip(b);
}
for i in 0..16 {
let mut b = BitList16::with_capacity(i).unwrap();
for j in 0..i {
if j % 2 == 0 {
b.set(j, true).unwrap();
}
}
assert_round_trip(b);
let mut b = BitList16::with_capacity(i).unwrap();
for j in 0..i {
b.set(j, true).unwrap();
}
assert_round_trip(b);
}
}
fn assert_round_trip<T: Encode + Decode + PartialEq + std::fmt::Debug>(t: T) {
assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t);
}
#[test]
fn from_raw_bytes() {
assert!(BitList1024::from_raw_bytes(vec![0b0000_0000], 0).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 1).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0011], 2).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0111], 3).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0000_1111], 4).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0001_1111], 5).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0011_1111], 6).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b0111_1111], 7).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], 8).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0001], 9).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0011], 10).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0111], 11).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_1111], 12).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0001_1111], 13).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0011_1111], 14).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0111_1111], 15).is_ok());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b1111_1111], 16).is_ok());
for i in 0..8 {
assert!(BitList1024::from_raw_bytes(vec![], i).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], i).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0000, 0b1111_1110], i).is_err());
}
assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 0).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 0).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0011], 1).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0000_0111], 2).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0000_1111], 3).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0001_1111], 4).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0011_1111], 5).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b0111_1111], 6).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], 7).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0001], 8).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0011], 9).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0111], 10).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_1111], 11).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0001_1111], 12).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0011_1111], 13).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0111_1111], 14).is_err());
assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b1111_1111], 15).is_err());
}
fn test_set_unset(num_bits: usize) {
let mut bitfield = BitList1024::with_capacity(num_bits).unwrap();
for i in 0..=num_bits {
if i < num_bits {
// Starts as false
assert_eq!(bitfield.get(i), Ok(false));
// Can be set true.
assert!(bitfield.set(i, true).is_ok());
assert_eq!(bitfield.get(i), Ok(true));
// Can be set false
assert!(bitfield.set(i, false).is_ok());
assert_eq!(bitfield.get(i), Ok(false));
} else {
assert!(bitfield.get(i).is_err());
assert!(bitfield.set(i, true).is_err());
assert!(bitfield.get(i).is_err());
}
}
}
fn test_bytes_round_trip(num_bits: usize) {
for i in 0..num_bits {
let mut bitfield = BitList1024::with_capacity(num_bits).unwrap();
bitfield.set(i, true).unwrap();
let bytes = bitfield.clone().into_raw_bytes();
assert_eq!(bitfield, Bitfield::from_raw_bytes(bytes, num_bits).unwrap());
}
}
#[test]
fn set_unset() {
for i in 0..8 * 5 {
test_set_unset(i)
}
}
#[test]
fn bytes_round_trip() {
for i in 0..8 * 5 {
test_bytes_round_trip(i)
}
}
#[test]
fn into_raw_bytes() {
let mut bitfield = BitList1024::with_capacity(9).unwrap();
bitfield.set(0, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0000_0001, 0b0000_0000]
);
bitfield.set(1, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0000_0011, 0b0000_0000]
);
bitfield.set(2, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0000_0111, 0b0000_0000]
);
bitfield.set(3, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0000_1111, 0b0000_0000]
);
bitfield.set(4, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0001_1111, 0b0000_0000]
);
bitfield.set(5, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0011_1111, 0b0000_0000]
);
bitfield.set(6, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b0111_1111, 0b0000_0000]
);
bitfield.set(7, true).unwrap();
assert_eq!(
bitfield.clone().into_raw_bytes(),
vec![0b1111_1111, 0b0000_0000]
);
bitfield.set(8, true).unwrap();
assert_eq!(bitfield.into_raw_bytes(), vec![0b1111_1111, 0b0000_0001]);
}
#[test]
fn highest_set_bit() {
assert_eq!(
BitList1024::with_capacity(16).unwrap().highest_set_bit(),
None
);
assert_eq!(
BitList1024::from_raw_bytes(vec![0b0000_0001, 0b0000_0000], 16)
.unwrap()
.highest_set_bit(),
Some(0)
);
assert_eq!(
BitList1024::from_raw_bytes(vec![0b0000_0010, 0b0000_0000], 16)
.unwrap()
.highest_set_bit(),
Some(1)
);
assert_eq!(
BitList1024::from_raw_bytes(vec![0b0000_1000], 8)
.unwrap()
.highest_set_bit(),
Some(3)
);
assert_eq!(
BitList1024::from_raw_bytes(vec![0b0000_0000, 0b1000_0000], 16)
.unwrap()
.highest_set_bit(),
Some(15)
);
}
#[test]
fn intersection() {
let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap();
let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
let c = BitList1024::from_raw_bytes(vec![0b1000, 0b0001], 16).unwrap();
assert_eq!(a.intersection(&b), c);
assert_eq!(b.intersection(&a), c);
assert_eq!(a.intersection(&c), c);
assert_eq!(b.intersection(&c), c);
assert_eq!(a.intersection(&a), a);
assert_eq!(b.intersection(&b), b);
assert_eq!(c.intersection(&c), c);
}
#[test]
fn intersection_diff_length() {
let a = BitList1024::from_bytes(vec![0b0010_1110, 0b0010_1011]).unwrap();
let b = BitList1024::from_bytes(vec![0b0010_1101, 0b0000_0001]).unwrap();
let c = BitList1024::from_bytes(vec![0b0010_1100, 0b0000_0001]).unwrap();
let d = BitList1024::from_bytes(vec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap();
assert_eq!(a.len(), 13);
assert_eq!(b.len(), 8);
assert_eq!(c.len(), 8);
assert_eq!(d.len(), 23);
assert_eq!(a.intersection(&b), c);
assert_eq!(b.intersection(&a), c);
assert_eq!(a.intersection(&d), a);
assert_eq!(d.intersection(&a), a);
}
#[test]
fn union() {
let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap();
let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
let c = BitList1024::from_raw_bytes(vec![0b1111, 0b1001], 16).unwrap();
assert_eq!(a.union(&b), c);
assert_eq!(b.union(&a), c);
assert_eq!(a.union(&a), a);
assert_eq!(b.union(&b), b);
assert_eq!(c.union(&c), c);
}
#[test]
fn union_diff_length() {
let a = BitList1024::from_bytes(vec![0b0010_1011, 0b0010_1110]).unwrap();
let b = BitList1024::from_bytes(vec![0b0000_0001, 0b0010_1101]).unwrap();
let c = BitList1024::from_bytes(vec![0b0010_1011, 0b0010_1111]).unwrap();
let d = BitList1024::from_bytes(vec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap();
assert_eq!(a.len(), c.len());
assert_eq!(a.union(&b), c);
assert_eq!(b.union(&a), c);
assert_eq!(a.union(&d), d);
assert_eq!(d.union(&a), d);
}
#[test]
fn difference() {
let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap();
let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
let a_b = BitList1024::from_raw_bytes(vec![0b0100, 0b0000], 16).unwrap();
let b_a = BitList1024::from_raw_bytes(vec![0b0011, 0b1000], 16).unwrap();
assert_eq!(a.difference(&b), a_b);
assert_eq!(b.difference(&a), b_a);
assert!(a.difference(&a).is_zero());
}
#[test]
fn difference_diff_length() {
let a = BitList1024::from_raw_bytes(vec![0b0110, 0b1100, 0b0011], 24).unwrap();
let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
let a_b = BitList1024::from_raw_bytes(vec![0b0100, 0b0100, 0b0011], 24).unwrap();
let b_a = BitList1024::from_raw_bytes(vec![0b1001, 0b0001], 16).unwrap();
assert_eq!(a.difference(&b), a_b);
assert_eq!(b.difference(&a), b_a);
}
#[test]
fn shift_up() {
let mut a = BitList1024::from_raw_bytes(vec![0b1100_1111, 0b1101_0110], 16).unwrap();
let mut b = BitList1024::from_raw_bytes(vec![0b1001_1110, 0b1010_1101], 16).unwrap();
a.shift_up(1).unwrap();
assert_eq!(a, b);
a.shift_up(15).unwrap();
assert!(a.is_zero());
b.shift_up(16).unwrap();
assert!(b.is_zero());
assert!(b.shift_up(17).is_err());
}
#[test]
fn num_set_bits() {
let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap();
let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap();
assert_eq!(a.num_set_bits(), 3);
assert_eq!(b.num_set_bits(), 5);
}
#[test]
fn iter() {
let mut bitfield = BitList1024::with_capacity(9).unwrap();
bitfield.set(2, true).unwrap();
bitfield.set(8, true).unwrap();
assert_eq!(
bitfield.iter().collect::<Vec<bool>>(),
vec![false, false, true, false, false, false, false, false, true]
);
}
#[test]
fn ssz_bytes_len() {
for i in 1..64 {
let mut bitfield = BitList1024::with_capacity(i).unwrap();
for j in 0..i {
bitfield.set(j, true).expect("should set bit in bounds");
}
let bytes = bitfield.as_ssz_bytes();
assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i);
}
}
}
| 34.73246 | 117 | 0.57401 |
f71a84a188996ab75a1bd45de6a371442681aed3 | 2,332 | use gfx_glyph::Font;
use amethyst_assets::{Asset, Error, Handle, ProcessingState, ResultExt, SimpleFormat};
use amethyst_core::specs::prelude::VecStorage;
/// A loaded set of fonts from a file.
#[derive(Clone)]
pub struct FontAsset(pub Font<'static>);
/// A handle to font data stored with `amethyst_assets`.
pub type FontHandle = Handle<FontAsset>;
#[derive(Clone)]
pub struct FontData(Font<'static>);
impl Asset for FontAsset {
const NAME: &'static str = "ui::Font";
type Data = FontData;
type HandleStorage = VecStorage<Handle<Self>>;
}
impl Into<Result<ProcessingState<FontAsset>, Error>> for FontData {
fn into(self) -> Result<ProcessingState<FontAsset>, Error> {
Ok(ProcessingState::Loaded(FontAsset(self.0)))
}
}
/// Identical to TtfFormat.
///
/// Loads font files, supports TrueType and **some** OpenType files.
///
/// OpenType is a superset of TrueType, so if your OpenType file uses any features that don't
/// exist in TrueType this will fail. This will only load the first font contained in a file.
/// If this is a problem for you please file an issue with Amethyst on GitHub.
pub type OtfFormat = TtfFormat;
/// Loads font files, supports TrueType and **some** OpenType files.
///
/// OpenType is a superset of TrueType, so if your OpenType file uses any features that don't
/// exist in TrueType this will fail. This will only load the first font contained in a file.
/// If this is a problem for you please file an issue with Amethyst on GitHub.
#[derive(Clone)]
pub struct TtfFormat;
impl SimpleFormat<FontAsset> for TtfFormat {
const NAME: &'static str = "TTF/OTF";
type Options = ();
fn import(&self, bytes: Vec<u8>, _: ()) -> Result<FontData, Error> {
Font::from_bytes(bytes)
.map(FontData)
.chain_err(|| "Font parsing error")
}
}
/// Wrapper format for all core supported Font formats
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum FontFormat {
/// TTF Format
Ttf,
/// OTF Format
Otf,
}
impl SimpleFormat<FontAsset> for FontFormat {
const NAME: &'static str = "FontFormat";
type Options = ();
fn import(&self, bytes: Vec<u8>, _: ()) -> Result<FontData, Error> {
match *self {
FontFormat::Ttf | FontFormat::Otf => TtfFormat.import(bytes, ()),
}
}
}
| 31.093333 | 94 | 0.677101 |
7273f70f4c55dc46ee775ef960928a8ede410aab | 4,366 | use std::{
env,
error::Error,
fs,
path::{Path, PathBuf},
};
use quote::{format_ident, quote};
fn main() -> Result<(), Box<dyn Error>> {
let out_dir = &PathBuf::from(env::var("OUT_DIR")?);
let pkg_name = env::var("CARGO_PKG_NAME")?;
let target = env::var("TARGET")?;
txt2rust(&out_dir)?;
// place the linker script somewhere the linker can find it
fs::write(out_dir.join("link.x"), fs::read("link.x")?)?;
// place the assembly part of the entry point somewhere the linker can find it
fs::copy(
format!("bin/{}.a", target),
out_dir.join(format!("lib{}.a", pkg_name)),
)?;
println!("cargo:rustc-link-lib=static={}", pkg_name);
println!("cargo:rustc-link-search={}", out_dir.display());
Ok(())
}
struct Interrupt<'a> {
name: &'a str,
irq: u16,
description: &'a str,
}
/// Generate Rust code from `interrupts.txt`
// NOTE `interrupts.txt` was generated running `pdftotex -layout -f $start -l
// $end` on the reference manual (`$start..$end` includes section 3.2), plus
// some manual edits to remove duplicated names
fn txt2rust(out_dir: &Path) -> Result<(), Box<dyn Error>> {
const IRQ_START: u16 = 32;
const NIRQS: usize = 128;
let txt = fs::read_to_string("interrupts.txt")?;
let mut entries = vec![];
for line in txt.trim().lines() {
const EOF: &str = "unexpected EOF";
let mut parts = line.splitn(2, char::is_whitespace);
let irq = parts
.next()
.ok_or("expected IRQ number; found EOF")?
.parse()?;
let mut parts = parts
.next()
.ok_or(EOF)?
.trim()
.splitn(2, char::is_whitespace);
let name = parts
.next()
.ok_or("expected the interrupt name; found EOF")?;
let description = parts
.next()
.ok_or("expected the description; found EOF")?
.trim();
entries.push(Interrupt {
name,
irq,
description,
});
}
if entries.len() != NIRQS {
return Err("`interrupts.txt` must have 128 entries".into());
}
// remove all "reserved" entries
let interrupts = entries
.into_iter()
.filter(|entry| entry.name.to_lowercase() != "reserved")
.collect::<Vec<_>>();
// Generate `enum Interrupt`
let mut items = vec![];
let variants = interrupts
.iter()
.map(|interrupt| {
let description = interrupt.description;
let name = format_ident!("{}", interrupt.name);
let irq = interrupt.irq;
quote!(
#[doc = #description]
#name = #irq,
)
})
.collect::<Vec<_>>();
items.push(quote!(
/// List of interrupts
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
#[repr(u16)]
pub enum Interrupt {
#(#variants)*
}
impl Interrupt {
/// Returns the interrupt IRQ number
pub fn irq(self) -> u16 {
self as u16
}
}
));
let mut elmts = vec![];
let mut pos = IRQ_START;
for interrupt in &interrupts {
while pos != interrupt.irq {
// add a reserved entry
elmts.push(quote!({
extern "C" {
fn DefaultHandler();
}
DefaultHandler
}));
pos += 1;
}
let name = format_ident!("{}", interrupt.name);
elmts.push(quote!(
{
extern "C" {
fn #name();
}
#name
}
));
pos += 1;
}
// Generate `SPIS` array
items.push(quote!(
static SPIS: [unsafe extern "C" fn(); #NIRQS] = [#(#elmts,)*];
));
let code = quote!(#(#items)*);
fs::write(out_dir.join("interrupts.rs"), code.to_string().into_bytes())?;
// Also generate a linker script that provides a default value for all these interrupts
let mut script = String::new();
for interrupt in interrupts {
script.push_str(&format!("PROVIDE({} = DefaultHandler);\n", interrupt.name));
}
fs::write(out_dir.join("interrupts.x"), script)?;
Ok(())
}
| 26.301205 | 91 | 0.512368 |
d72bb6a41c4d696a3374030394be181fc0991cb7 | 9,105 | //! A high-level UnQLite database engine wrapper.
//!
//! [![travis-badge][]][travis] [![release-badge][]][cargo] [![downloads]][cargo]
//! [![docs-badge][]][docs] [![license-badge][]][cargo]
//!
//! NOTE: Some of the documents is stolen from [UnQLite Official Website][unqlite].
//!
//! # What is UnQLite?
//!
//! >
//! UnQLite is a software library which implements a *self-contained*, *serverless*,
//! zero-configuration, transactional NoSQL database engine. UnQLite is a document store database
//! similar to [MongoDB], [Redis], [CouchDB] etc. as well a standard Key/Value store similar to
//! [BerkeleyDB], [LevelDB], etc.
//! >
//! UnQLite is an embedded NoSQL (Key/Value store and Document-store) database engine. Unlike most
//! other NoSQL databases, UnQLite does not have a separate server process. UnQLite reads and
//! writes directly to ordinary disk files. A complete database with multiple collections, is
//! contained in **a single disk file**. The database file format is cross-platform, you can freely
//! copy a database between 32-bit and 64-bit systems or between big-endian and little-endian
//! architectures.
//!
//! # Port to Rust
//!
//! This crate is high-level UnQLite database wrapper for Rust. A low-level bindings wrapper
//! is avaliable as a seperated crate: [unqlite-sys](https://crates.io/crates/unqlite-sys).
//!
//! # Usage
//!
//! You can start with `UnQLite` constructors:
//!
//! ```
//! extern crate unqlite;
//!
//! use unqlite::{UnQLite, Config, KV, Cursor};
//!
//! # #[cfg(feature = "enable-threads")]
//! fn main() {
//! // The database memory is not handled by Rust, and the database is on-disk,
//! // so `mut` is not neccessary.
//! let unqlite = UnQLite::create_temp();
//! // Use any type that can use as `[u8]`
//! unqlite.kv_store("key", "a long length value").unwrap();
//! unqlite.kv_store("abc", [1,2,3]).unwrap();
//!
//! let mut entry = unqlite.first();
//! // Iterate records
//! loop {
//! if entry.is_none() { break; }
//!
//! let record = entry.expect("valid entry");
//! let (key, value) = record.key_value();
//! println!("* Go through {:?} --> {:?}", key, value);
//!
//! if value.len() > 10 {
//! println!("** Delete key {:?} by value length", key);
//! entry = record.delete();
//! } else {
//! entry = record.next();
//! }
//! }
//! //panic!("for test");
//! }
//! # #[cfg(not(feature = "enable-threads"))]
//! # fn main() { }
//! ```
//!
//! [unqlite]: https://unqlite.org/index.html
//! [travis-badge]: https://img.shields.io/travis/zitsen/unqlite.rs.svg?style=flat-square
//! [travis]: https://travis-ci.org/zitsen/unqlite.rs
//! [release-badge]: https://img.shields.io/crates/v/unqlite.svg?style=flat-square
//! [downloads]: https://img.shields.io/crates/d/unqlite.svg?style=flat-square
//! [cargo]: https://crates.io/crates/unqlite
//! [docs-badge]: https://img.shields.io/badge/API-docs-blue.svg?style=flat-square
//! [docs]: https://zitsen.github.io/unqlite.rs
//! [license-badge]: https://img.shields.io/crates/l/unqlite.svg?style=flat-square
extern crate libc;
#[cfg(test)]
extern crate tempfile;
#[macro_use]
extern crate paste;
pub use error::{Error, Result};
use error::Wrap;
use ffi::{unqlite_close, unqlite_open};
use std::ffi::CString;
use std::mem;
use std::ptr::NonNull;
/// UnQLite database entry point.
///
/// UnQLite support both in-memory and on-disk database.
/// There's several constructors:
///
/// Constructor | Meaning
/// --- | ---
/// [`create_in_memory`](#method.create_in_memory) | Create a private, in-memory database.
/// [`create_temp`](#method.create_temp) | Create a private, temporary on-disk database.
/// [`create`](#method.create) | Create if not exists, otherwise, open as read-write.
/// [`open_mmap`](#method.open_mmap) | Obtain a read-only memory view of the whole database.
/// [`open_readonly`](#method.open_readonly) | Open the database in a read-only mode.
///
pub struct UnQLite {
engine: NonNull<::ffi::unqlite>,
}
macro_rules! eval {
($i: ident, $($e: expr),*) => (
unsafe {
paste::expr! {
[<unqlite_ $i>]($($e),*)
}
}
);
}
macro_rules! wrap {
($i: ident, $($e: expr),*) => (eval!($i, $($e),*).wrap());
}
macro_rules! wrap_raw {
($self_:ident, $i: ident) => (
wrap!($i, $self_.as_raw_mut_ptr())
);
($self_:ident, $i: ident, $($e: expr),+) => (
wrap!($i, $self_.as_raw_mut_ptr(), $($e),+)
);
}
impl UnQLite {
/// Create UnQLite database at specific path.
///
/// ```ignore
/// let _ = UnQLite::open("str");
/// let _ = UnQLite::open(String::new());
/// ```
#[inline]
fn open<P: AsRef<str>>(filename: P, mode: OpenMode) -> Result<UnQLite> {
let mut db: *mut ::ffi::unqlite = unsafe { mem::uninitialized() };
let filename = filename.as_ref();
let filename = try!(CString::new(filename));
wrap!(open, &mut db, filename.as_ptr(), mode.into()).map(|_| UnQLite {
engine: unsafe { NonNull::new_unchecked(db) },
})
}
/// Create UnQLite database as `filename`.
///
/// By default, the database is created in read-write mode.
///
/// ## Panics
///
/// Will panic if failed in creating.
///
/// ## Example
///
/// ```ignore
/// let _ = UnQLite::create("test.db");
/// ```
///
/// ## C
///
/// ```c
/// unqlite *pDb;
///
/// // on-disk database
/// rc = unqlite_open(&pDb,"test.db",UNQLITE_OPEN_CREATE);
///
/// // in-memory database
/// rc = unqlite_open(&pDb, ":mem:", UNQLITE_OPEN_MEM);
/// ```
#[inline]
pub fn create<P: AsRef<str>>(filename: P) -> UnQLite {
Self::open(filename, OpenMode::Create).unwrap()
}
/// Create database in memory.
///
/// Equivalent to:
///
/// ```ignore
/// let _ = UnQLite::create(":mem:");
/// ```
/// ## Panics
///
/// Will panic if failed in creating.
///
#[inline]
pub fn create_in_memory() -> UnQLite {
Self::create(":mem:")
}
/// A private, temporary on-disk database will be created.
///
/// This private database will be automatically deleted as soon as
/// the database connection is closed.
///
/// ## Panics
///
/// Will panic if failed in creating.
///
/// ## C
///
/// ```c
/// int rc = unqlite_open("test.db", UNQLITE_OPEN_TEMP_DB);
/// ```
#[inline]
pub fn create_temp() -> UnQLite {
Self::open("", OpenMode::TempDB).unwrap()
}
/// Obtain a read-only memory view of the whole database.
///
/// You will get significant performance improvements with this combination but your database
/// is still read-only.
///
/// ## Panics
///
/// Panic if open failed.
///
/// ## C
///
/// ```c
/// unqlite_open(&pDb, "test.db", UNQLITE_OPEN_MMAP | UNQLITE_OPEN_READONLY);
/// ```
#[inline]
pub fn open_mmap<P: AsRef<str>>(filename: P) -> UnQLite {
Self::open(filename, OpenMode::MMap).unwrap()
}
/// Open the database in a read-only mode.
///
/// That is, you cannot perform a store, append, commit or rollback operations with this
/// control flag.
///
/// Always prefer to use `open_mmap` for readonly in disk database.
///
/// ## Panics
///
/// Panic too.
///
/// ## C
/// ```c
/// unqlite_open(&pDb, "test.db", UNQLITE_OPEN_READONLY);
/// ```
#[inline]
pub fn open_readonly<P: AsRef<str>>(filename: P) -> UnQLite {
Self::open(filename, OpenMode::ReadOnly).unwrap()
}
fn close(&self) -> Result<()> {
wrap!(close, self.as_raw_mut_ptr())
}
unsafe fn as_raw_mut_ptr(&self) -> *mut ::ffi::unqlite {
self.engine.as_ptr()
}
}
unsafe impl Send for UnQLite {}
unsafe impl Sync for UnQLite {}
impl Drop for UnQLite {
fn drop(&mut self) {
self.close().unwrap();
}
}
#[allow(dead_code, non_snake_case, non_camel_case_types)]
pub mod ffi;
#[allow(dead_code)]
pub mod vars;
mod error;
mod openmode;
mod config;
mod util;
mod transaction;
mod kv_store;
mod kv_cursor;
pub mod document;
pub use self::config::Config;
pub use self::kv_cursor::*;
pub use self::kv_store::*;
use self::openmode::OpenMode;
pub use self::transaction::Transaction;
pub use self::util::*;
#[cfg(test)]
#[cfg(feature = "enable-threads")]
mod tests_threadsafe {
use super::UnQLite;
#[test]
fn create_temp() {
let _ = UnQLite::create_temp();
}
#[test]
fn create_in_memory() {
let _ = UnQLite::create_in_memory();
}
#[test]
fn from_readonly_memory() {
let _ = UnQLite::open_readonly(":mem:");
}
}
#[cfg(test)]
mod tests {
use super::UnQLite;
#[test]
fn open() {
let _ = UnQLite::create_temp();
let _ = UnQLite::create_in_memory();
let _ = UnQLite::open_readonly(":mem:");
}
}
| 27.929448 | 99 | 0.581549 |
23abe98e457461d480ffc90c865b6c7d8bea2cd3 | 42,973 | use crate::io::{Interest, PollEvented, ReadBuf, Ready};
use crate::net::{to_socket_addrs, ToSocketAddrs};
use std::convert::TryFrom;
use std::fmt;
use std::io;
use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::task::{Context, Poll};
cfg_net! {
/// A UDP socket
///
/// UDP is "connectionless", unlike TCP. Meaning, regardless of what address you've bound to, a `UdpSocket`
/// is free to communicate with many different remotes. In tokio there are basically two main ways to use `UdpSocket`:
///
/// * one to many: [`bind`](`UdpSocket::bind`) and use [`send_to`](`UdpSocket::send_to`)
/// and [`recv_from`](`UdpSocket::recv_from`) to communicate with many different addresses
/// * one to one: [`connect`](`UdpSocket::connect`) and associate with a single address, using [`send`](`UdpSocket::send`)
/// and [`recv`](`UdpSocket::recv`) to communicate only with that remote address
///
/// `UdpSocket` can also be used concurrently to `send_to` and `recv_from` in different tasks,
/// all that's required is that you `Arc<UdpSocket>` and clone a reference for each task.
///
/// # Streams
///
/// If you need to listen over UDP and produce a [`Stream`], you can look
/// at [`UdpFramed`].
///
/// [`UdpFramed`]: https://docs.rs/tokio-util/latest/tokio_util/udp/struct.UdpFramed.html
/// [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html
///
/// # Example: one to many (bind)
///
/// Using `bind` we can create a simple echo server that sends and recv's with many different clients:
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let sock = UdpSocket::bind("0.0.0.0:8080").await?;
/// let mut buf = [0; 1024];
/// loop {
/// let (len, addr) = sock.recv_from(&mut buf).await?;
/// println!("{:?} bytes received from {:?}", len, addr);
///
/// let len = sock.send_to(&buf[..len], addr).await?;
/// println!("{:?} bytes sent", len);
/// }
/// }
/// ```
///
/// # Example: one to one (connect)
///
/// Or using `connect` we can echo with a single remote address using `send` and `recv`:
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let sock = UdpSocket::bind("0.0.0.0:8080").await?;
///
/// let remote_addr = "127.0.0.1:59611";
/// sock.connect(remote_addr).await?;
/// let mut buf = [0; 1024];
/// loop {
/// let len = sock.recv(&mut buf).await?;
/// println!("{:?} bytes received from {:?}", len, remote_addr);
///
/// let len = sock.send(&buf[..len]).await?;
/// println!("{:?} bytes sent", len);
/// }
/// }
/// ```
///
/// # Example: Sending/Receiving concurrently
///
/// Because `send_to` and `recv_from` take `&self`. It's perfectly alright to `Arc<UdpSocket>`
/// and share the references to multiple tasks, in order to send/receive concurrently. Here is
/// a similar "echo" example but that supports concurrent sending/receiving:
///
/// ```no_run
/// use tokio::{net::UdpSocket, sync::mpsc};
/// use std::{io, net::SocketAddr, sync::Arc};
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let sock = UdpSocket::bind("0.0.0.0:8080".parse::<SocketAddr>().unwrap()).await?;
/// let r = Arc::new(sock);
/// let s = r.clone();
/// let (tx, mut rx) = mpsc::channel::<(Vec<u8>, SocketAddr)>(1_000);
///
/// tokio::spawn(async move {
/// while let Some((bytes, addr)) = rx.recv().await {
/// let len = s.send_to(&bytes, &addr).await.unwrap();
/// println!("{:?} bytes sent", len);
/// }
/// });
///
/// let mut buf = [0; 1024];
/// loop {
/// let (len, addr) = r.recv_from(&mut buf).await?;
/// println!("{:?} bytes received from {:?}", len, addr);
/// tx.send((buf[..len].to_vec(), addr)).await.unwrap();
/// }
/// }
/// ```
///
pub struct UdpSocket {
io: PollEvented<mio::net::UdpSocket>,
}
}
impl UdpSocket {
/// This function will create a new UDP socket and attempt to bind it to
/// the `addr` provided.
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let sock = UdpSocket::bind("0.0.0.0:8080").await?;
/// // use `sock`
/// # let _ = sock;
/// Ok(())
/// }
/// ```
pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> {
let addrs = to_socket_addrs(addr).await?;
let mut last_err = None;
for addr in addrs {
match UdpSocket::bind_addr(addr) {
Ok(socket) => return Ok(socket),
Err(e) => last_err = Some(e),
}
}
Err(last_err.unwrap_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"could not resolve to any address",
)
}))
}
fn bind_addr(addr: SocketAddr) -> io::Result<UdpSocket> {
let sys = mio::net::UdpSocket::bind(addr)?;
UdpSocket::new(sys)
}
fn new(socket: mio::net::UdpSocket) -> io::Result<UdpSocket> {
let io = PollEvented::new(socket)?;
Ok(UdpSocket { io })
}
/// Creates new `UdpSocket` from a previously bound `std::net::UdpSocket`.
///
/// This function is intended to be used to wrap a UDP socket from the
/// standard library in the Tokio equivalent. The conversion assumes nothing
/// about the underlying socket; it is left up to the user to set it in
/// non-blocking mode.
///
/// This can be used in conjunction with socket2's `Socket` interface to
/// configure a socket before it's handed off, such as setting options like
/// `reuse_address` or binding to multiple addresses.
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// # use std::{io, net::SocketAddr};
///
/// # #[tokio::main]
/// # async fn main() -> io::Result<()> {
/// let addr = "0.0.0.0:8080".parse::<SocketAddr>().unwrap();
/// let std_sock = std::net::UdpSocket::bind(addr)?;
/// std_sock.set_nonblocking(true)?;
/// let sock = UdpSocket::from_std(std_sock)?;
/// // use `sock`
/// # Ok(())
/// # }
/// ```
pub fn from_std(socket: net::UdpSocket) -> io::Result<UdpSocket> {
let io = mio::net::UdpSocket::from_std(socket);
UdpSocket::new(io)
}
/// Returns the local address that this socket is bound to.
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// # use std::{io, net::SocketAddr};
///
/// # #[tokio::main]
/// # async fn main() -> io::Result<()> {
/// let addr = "0.0.0.0:8080".parse::<SocketAddr>().unwrap();
/// let sock = UdpSocket::bind(addr).await?;
/// // the address the socket is bound to
/// let local_addr = sock.local_addr()?;
/// # Ok(())
/// # }
/// ```
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.local_addr()
}
/// Connects the UDP socket setting the default destination for send() and
/// limiting packets that are read via recv from the address specified in
/// `addr`.
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// # use std::{io, net::SocketAddr};
///
/// # #[tokio::main]
/// # async fn main() -> io::Result<()> {
/// let sock = UdpSocket::bind("0.0.0.0:8080".parse::<SocketAddr>().unwrap()).await?;
///
/// let remote_addr = "127.0.0.1:59600".parse::<SocketAddr>().unwrap();
/// sock.connect(remote_addr).await?;
/// let mut buf = [0u8; 32];
/// // recv from remote_addr
/// let len = sock.recv(&mut buf).await?;
/// // send to remote_addr
/// let _len = sock.send(&buf[..len]).await?;
/// # Ok(())
/// # }
/// ```
pub async fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> {
let addrs = to_socket_addrs(addr).await?;
let mut last_err = None;
for addr in addrs {
match self.io.connect(addr) {
Ok(_) => return Ok(()),
Err(e) => last_err = Some(e),
}
}
Err(last_err.unwrap_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"could not resolve to any address",
)
}))
}
/// Wait for any of the requested ready states.
///
/// This function is usually paired with `try_recv()` or `try_send()`. It
/// can be used to concurrently recv / send to the same socket on a single
/// task without splitting the socket.
///
/// The function may complete without the socket being ready. This is a
/// false-positive and attempting an operation will return with
/// `io::ErrorKind::WouldBlock`.
///
/// # Examples
///
/// Concurrently receive from and send to the socket on the same task
/// without splitting.
///
/// ```no_run
/// use tokio::io::{self, Interest};
/// use tokio::net::UdpSocket;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// loop {
/// let ready = socket.ready(Interest::READABLE | Interest::WRITABLE).await?;
///
/// if ready.is_readable() {
/// // The buffer is **not** included in the async task and will only exist
/// // on the stack.
/// let mut data = [0; 1024];
/// match socket.try_recv(&mut data[..]) {
/// Ok(n) => {
/// println!("received {:?}", &data[..n]);
/// }
/// // False-positive, continue
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
///
/// if ready.is_writable() {
/// // Write some data
/// match socket.try_send(b"hello world") {
/// Ok(n) => {
/// println!("sent {} bytes", n);
/// }
/// // False-positive, continue
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
/// }
/// }
/// ```
pub async fn ready(&self, interest: Interest) -> io::Result<Ready> {
let event = self.io.registration().readiness(interest).await?;
Ok(event.ready)
}
/// Wait for the socket to become writable.
///
/// This function is equivalent to `ready(Interest::WRITABLE)` and is
/// usually paired with `try_send()` or `try_send_to()`.
///
/// The function may complete without the socket being writable. This is a
/// false-positive and attempting a `try_send()` will return with
/// `io::ErrorKind::WouldBlock`.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Bind socket
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// loop {
/// // Wait for the socket to be writable
/// socket.writable().await?;
///
/// // Try to send data, this may still fail with `WouldBlock`
/// // if the readiness event is a false positive.
/// match socket.try_send(b"hello world") {
/// Ok(n) => {
/// break;
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// continue;
/// }
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
///
/// Ok(())
/// }
/// ```
pub async fn writable(&self) -> io::Result<()> {
self.ready(Interest::WRITABLE).await?;
Ok(())
}
/// Sends data on the socket to the remote address that the socket is
/// connected to.
///
/// The [`connect`] method will connect this socket to a remote address.
/// This method will fail if the socket is not connected.
///
/// [`connect`]: method@Self::connect
///
/// # Return
///
/// On success, the number of bytes sent is returned, otherwise, the
/// encountered error is returned.
///
/// # Examples
///
/// ```no_run
/// use tokio::io;
/// use tokio::net::UdpSocket;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Bind socket
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// // Send a message
/// socket.send(b"hello world").await?;
///
/// Ok(())
/// }
/// ```
pub async fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.io
.registration()
.async_io(Interest::WRITABLE, || self.io.send(buf))
.await
}
/// Attempts to send data on the socket to the remote address to which it
/// was previously `connect`ed.
///
/// The [`connect`] method will connect this socket to a remote address.
/// This method will fail if the socket is not connected.
///
/// Note that on multiple calls to a `poll_*` method in the send direction,
/// only the `Waker` from the `Context` passed to the most recent call will
/// be scheduled to receive a wakeup.
///
/// # Return value
///
/// The function returns:
///
/// * `Poll::Pending` if the socket is not available to write
/// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent
/// * `Poll::Ready(Err(e))` if an error is encountered.
///
/// # Errors
///
/// This function may encounter any standard I/O error except `WouldBlock`.
///
/// [`connect`]: method@Self::connect
pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.io
.registration()
.poll_write_io(cx, || self.io.send(buf))
}
/// Try to send data on the socket to the remote address to which it is
/// connected.
///
/// When the socket buffer is full, `Err(io::ErrorKind::WouldBlock)` is
/// returned. This function is usually paired with `writable()`.
///
/// # Returns
///
/// If successful, `Ok(n)` is returned, where `n` is the number of bytes
/// sent. If the socket is not ready to send data,
/// `Err(ErrorKind::WouldBlock)` is returned.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Bind a UDP socket
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
///
/// // Connect to a peer
/// socket.connect("127.0.0.1:8081").await?;
///
/// loop {
/// // Wait for the socket to be writable
/// socket.writable().await?;
///
/// // Try to send data, this may still fail with `WouldBlock`
/// // if the readiness event is a false positive.
/// match socket.try_send(b"hello world") {
/// Ok(n) => {
/// break;
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// continue;
/// }
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
///
/// Ok(())
/// }
/// ```
pub fn try_send(&self, buf: &[u8]) -> io::Result<usize> {
self.io
.registration()
.try_io(Interest::WRITABLE, || self.io.send(buf))
}
/// Wait for the socket to become readable.
///
/// This function is equivalent to `ready(Interest::READABLE)` and is usually
/// paired with `try_recv()`.
///
/// The function may complete without the socket being readable. This is a
/// false-positive and attempting a `try_recv()` will return with
/// `io::ErrorKind::WouldBlock`.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Connect to a peer
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// loop {
/// // Wait for the socket to be readable
/// socket.readable().await?;
///
/// // The buffer is **not** included in the async task and will
/// // only exist on the stack.
/// let mut buf = [0; 1024];
///
/// // Try to recv data, this may still fail with `WouldBlock`
/// // if the readiness event is a false positive.
/// match socket.try_recv(&mut buf) {
/// Ok(n) => {
/// println!("GOT {:?}", &buf[..n]);
/// break;
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// continue;
/// }
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
///
/// Ok(())
/// }
/// ```
pub async fn readable(&self) -> io::Result<()> {
self.ready(Interest::READABLE).await?;
Ok(())
}
/// Receives a single datagram message on the socket from the remote address
/// to which it is connected. On success, returns the number of bytes read.
///
/// The function must be called with valid byte array `buf` of sufficient
/// size to hold the message bytes. If a message is too long to fit in the
/// supplied buffer, excess bytes may be discarded.
///
/// The [`connect`] method will connect this socket to a remote address.
/// This method will fail if the socket is not connected.
///
/// [`connect`]: method@Self::connect
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Bind socket
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// let mut buf = vec![0; 10];
/// let n = socket.recv(&mut buf).await?;
///
/// println!("received {} bytes {:?}", n, &buf[..n]);
///
/// Ok(())
/// }
/// ```
pub async fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.io
.registration()
.async_io(Interest::READABLE, || self.io.recv(buf))
.await
}
/// Attempts to receive a single datagram message on the socket from the remote
/// address to which it is `connect`ed.
///
/// The [`connect`] method will connect this socket to a remote address. This method
/// resolves to an error if the socket is not connected.
///
/// Note that on multiple calls to a `poll_*` method in the recv direction, only the
/// `Waker` from the `Context` passed to the most recent call will be scheduled to
/// receive a wakeup.
///
/// # Return value
///
/// The function returns:
///
/// * `Poll::Pending` if the socket is not ready to read
/// * `Poll::Ready(Ok(()))` reads data `ReadBuf` if the socket is ready
/// * `Poll::Ready(Err(e))` if an error is encountered.
///
/// # Errors
///
/// This function may encounter any standard I/O error except `WouldBlock`.
///
/// [`connect`]: method@Self::connect
pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
let n = ready!(self.io.registration().poll_read_io(cx, || {
// Safety: will not read the maybe uinitialized bytes.
let b = unsafe {
&mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8])
};
self.io.recv(b)
}))?;
// Safety: We trust `recv` to have filled up `n` bytes in the buffer.
unsafe {
buf.assume_init(n);
}
buf.advance(n);
Poll::Ready(Ok(()))
}
/// Try to receive a single datagram message on the socket from the remote
/// address to which it is connected. On success, returns the number of
/// bytes read.
///
/// The function must be called with valid byte array buf of sufficient size
/// to hold the message bytes. If a message is too long to fit in the
/// supplied buffer, excess bytes may be discarded.
///
/// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is
/// returned. This function is usually paired with `readable()`.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Connect to a peer
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// loop {
/// // Wait for the socket to be readable
/// socket.readable().await?;
///
/// // The buffer is **not** included in the async task and will
/// // only exist on the stack.
/// let mut buf = [0; 1024];
///
/// // Try to recv data, this may still fail with `WouldBlock`
/// // if the readiness event is a false positive.
/// match socket.try_recv(&mut buf) {
/// Ok(n) => {
/// println!("GOT {:?}", &buf[..n]);
/// break;
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// continue;
/// }
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
///
/// Ok(())
/// }
/// ```
pub fn try_recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.io
.registration()
.try_io(Interest::READABLE, || self.io.recv(buf))
}
/// Sends data on the socket to the given address. On success, returns the
/// number of bytes written.
///
/// Address type can be any implementor of [`ToSocketAddrs`] trait. See its
/// documentation for concrete examples.
///
/// It is possible for `addr` to yield multiple addresses, but `send_to`
/// will only send data to the first address yielded by `addr`.
///
/// This will return an error when the IP version of the local socket does
/// not match that returned from [`ToSocketAddrs`].
///
/// [`ToSocketAddrs`]: crate::net::ToSocketAddrs
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// let len = socket.send_to(b"hello world", "127.0.0.1:8081").await?;
///
/// println!("Sent {} bytes", len);
///
/// Ok(())
/// }
/// ```
pub async fn send_to<A: ToSocketAddrs>(&self, buf: &[u8], target: A) -> io::Result<usize> {
let mut addrs = to_socket_addrs(target).await?;
match addrs.next() {
Some(target) => self.send_to_addr(buf, target).await,
None => Err(io::Error::new(
io::ErrorKind::InvalidInput,
"no addresses to send data to",
)),
}
}
/// Attempts to send data on the socket to a given address.
///
/// Note that on multiple calls to a `poll_*` method in the send direction, only the
/// `Waker` from the `Context` passed to the most recent call will be scheduled to
/// receive a wakeup.
///
/// # Return value
///
/// The function returns:
///
/// * `Poll::Pending` if the socket is not ready to write
/// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent.
/// * `Poll::Ready(Err(e))` if an error is encountered.
///
/// # Errors
///
/// This function may encounter any standard I/O error except `WouldBlock`.
pub fn poll_send_to(
&self,
cx: &mut Context<'_>,
buf: &[u8],
target: SocketAddr,
) -> Poll<io::Result<usize>> {
self.io
.registration()
.poll_write_io(cx, || self.io.send_to(buf, target))
}
/// Try to send data on the socket to the given address, but if the send is
/// blocked this will return right away.
///
/// This function is usually paired with `writable()`.
///
/// # Returns
///
/// If successfull, returns the number of bytes sent
///
/// Users should ensure that when the remote cannot receive, the
/// [`ErrorKind::WouldBlock`] is properly handled. An error can also occur
/// if the IP version of the socket does not match that of `target`.
///
/// [`ErrorKind::WouldBlock`]: std::io::ErrorKind::WouldBlock
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::error::Error;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn Error>> {
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
///
/// let dst = "127.0.0.1:8081".parse()?;
///
/// loop {
/// socket.writable().await?;
///
/// match socket.try_send_to(&b"hello world"[..], dst) {
/// Ok(sent) => {
/// println!("sent {} bytes", sent);
/// break;
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// // Writable false positive.
/// continue;
/// }
/// Err(e) => return Err(e.into()),
/// }
/// }
///
/// Ok(())
/// }
/// ```
pub fn try_send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
self.io
.registration()
.try_io(Interest::WRITABLE, || self.io.send_to(buf, target))
}
async fn send_to_addr(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
self.io
.registration()
.async_io(Interest::WRITABLE, || self.io.send_to(buf, target))
.await
}
/// Receives a single datagram message on the socket. On success, returns
/// the number of bytes read and the origin.
///
/// The function must be called with valid byte array `buf` of sufficient
/// size to hold the message bytes. If a message is too long to fit in the
/// supplied buffer, excess bytes may be discarded.
///
/// # Example
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
///
/// let mut buf = vec![0u8; 32];
/// let (len, addr) = socket.recv_from(&mut buf).await?;
///
/// println!("received {:?} bytes from {:?}", len, addr);
///
/// Ok(())
/// }
/// ```
pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.io
.registration()
.async_io(Interest::READABLE, || self.io.recv_from(buf))
.await
}
/// Attempts to receive a single datagram on the socket.
///
/// Note that on multiple calls to a `poll_*` method in the recv direction, only the
/// `Waker` from the `Context` passed to the most recent call will be scheduled to
/// receive a wakeup.
///
/// # Return value
///
/// The function returns:
///
/// * `Poll::Pending` if the socket is not ready to read
/// * `Poll::Ready(Ok(addr))` reads data from `addr` into `ReadBuf` if the socket is ready
/// * `Poll::Ready(Err(e))` if an error is encountered.
///
/// # Errors
///
/// This function may encounter any standard I/O error except `WouldBlock`.
pub fn poll_recv_from(
&self,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<SocketAddr>> {
let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || {
// Safety: will not read the maybe uinitialized bytes.
let b = unsafe {
&mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8])
};
self.io.recv_from(b)
}))?;
// Safety: We trust `recv` to have filled up `n` bytes in the buffer.
unsafe {
buf.assume_init(n);
}
buf.advance(n);
Poll::Ready(Ok(addr))
}
/// Try to receive a single datagram message on the socket. On success,
/// returns the number of bytes read and the origin.
///
/// The function must be called with valid byte array buf of sufficient size
/// to hold the message bytes. If a message is too long to fit in the
/// supplied buffer, excess bytes may be discarded.
///
/// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is
/// returned. This function is usually paired with `readable()`.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Connect to a peer
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
/// socket.connect("127.0.0.1:8081").await?;
///
/// loop {
/// // Wait for the socket to be readable
/// socket.readable().await?;
///
/// // The buffer is **not** included in the async task and will
/// // only exist on the stack.
/// let mut buf = [0; 1024];
///
/// // Try to recv data, this may still fail with `WouldBlock`
/// // if the readiness event is a false positive.
/// match socket.try_recv_from(&mut buf) {
/// Ok((n, _addr)) => {
/// println!("GOT {:?}", &buf[..n]);
/// break;
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// continue;
/// }
/// Err(e) => {
/// return Err(e);
/// }
/// }
/// }
///
/// Ok(())
/// }
/// ```
pub fn try_recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.io
.registration()
.try_io(Interest::READABLE, || self.io.recv_from(buf))
}
/// Receives data from the socket, without removing it from the input queue.
/// On success, returns the number of bytes read and the address from whence
/// the data came.
///
/// # Notes
///
/// On Windows, if the data is larger than the buffer specified, the buffer
/// is filled with the first part of the data, and peek_from returns the error
/// WSAEMSGSIZE(10040). The excess data is lost.
/// Make sure to always use a sufficiently large buffer to hold the
/// maximum UDP packet size, which can be up to 65536 bytes in size.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let socket = UdpSocket::bind("127.0.0.1:8080").await?;
///
/// let mut buf = vec![0u8; 32];
/// let (len, addr) = socket.peek_from(&mut buf).await?;
///
/// println!("peeked {:?} bytes from {:?}", len, addr);
///
/// Ok(())
/// }
/// ```
pub async fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.io
.registration()
.async_io(Interest::READABLE, || self.io.peek_from(buf))
.await
}
/// Receives data from the socket, without removing it from the input queue.
/// On success, returns the number of bytes read.
///
/// # Notes
///
/// Note that on multiple calls to a `poll_*` method in the recv direction, only the
/// `Waker` from the `Context` passed to the most recent call will be scheduled to
/// receive a wakeup
///
/// On Windows, if the data is larger than the buffer specified, the buffer
/// is filled with the first part of the data, and peek returns the error
/// WSAEMSGSIZE(10040). The excess data is lost.
/// Make sure to always use a sufficiently large buffer to hold the
/// maximum UDP packet size, which can be up to 65536 bytes in size.
///
/// # Return value
///
/// The function returns:
///
/// * `Poll::Pending` if the socket is not ready to read
/// * `Poll::Ready(Ok(addr))` reads data from `addr` into `ReadBuf` if the socket is ready
/// * `Poll::Ready(Err(e))` if an error is encountered.
///
/// # Errors
///
/// This function may encounter any standard I/O error except `WouldBlock`.
pub fn poll_peek_from(
&self,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<SocketAddr>> {
let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || {
// Safety: will not read the maybe uinitialized bytes.
let b = unsafe {
&mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8])
};
self.io.peek_from(b)
}))?;
// Safety: We trust `recv` to have filled up `n` bytes in the buffer.
unsafe {
buf.assume_init(n);
}
buf.advance(n);
Poll::Ready(Ok(addr))
}
/// Gets the value of the `SO_BROADCAST` option for this socket.
///
/// For more information about this option, see [`set_broadcast`].
///
/// [`set_broadcast`]: method@Self::set_broadcast
pub fn broadcast(&self) -> io::Result<bool> {
self.io.broadcast()
}
/// Sets the value of the `SO_BROADCAST` option for this socket.
///
/// When enabled, this socket is allowed to send packets to a broadcast
/// address.
pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
self.io.set_broadcast(on)
}
/// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
///
/// For more information about this option, see [`set_multicast_loop_v4`].
///
/// [`set_multicast_loop_v4`]: method@Self::set_multicast_loop_v4
pub fn multicast_loop_v4(&self) -> io::Result<bool> {
self.io.multicast_loop_v4()
}
/// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
///
/// If enabled, multicast packets will be looped back to the local socket.
///
/// # Note
///
/// This may not have any affect on IPv6 sockets.
pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
self.io.set_multicast_loop_v4(on)
}
/// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
///
/// For more information about this option, see [`set_multicast_ttl_v4`].
///
/// [`set_multicast_ttl_v4`]: method@Self::set_multicast_ttl_v4
pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
self.io.multicast_ttl_v4()
}
/// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
///
/// Indicates the time-to-live value of outgoing multicast packets for
/// this socket. The default value is 1 which means that multicast packets
/// don't leave the local network unless explicitly requested.
///
/// # Note
///
/// This may not have any affect on IPv6 sockets.
pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
self.io.set_multicast_ttl_v4(ttl)
}
/// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
///
/// For more information about this option, see [`set_multicast_loop_v6`].
///
/// [`set_multicast_loop_v6`]: method@Self::set_multicast_loop_v6
pub fn multicast_loop_v6(&self) -> io::Result<bool> {
self.io.multicast_loop_v6()
}
/// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
///
/// Controls whether this socket sees the multicast packets it sends itself.
///
/// # Note
///
/// This may not have any affect on IPv4 sockets.
pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
self.io.set_multicast_loop_v6(on)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`].
///
/// [`set_ttl`]: method@Self::set_ttl
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// # use std::io;
///
/// # async fn dox() -> io::Result<()> {
/// let sock = UdpSocket::bind("127.0.0.1:8080").await?;
///
/// println!("{:?}", sock.ttl()?);
/// # Ok(())
/// # }
/// ```
pub fn ttl(&self) -> io::Result<u32> {
self.io.ttl()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
///
/// # Examples
///
/// ```no_run
/// use tokio::net::UdpSocket;
/// # use std::io;
///
/// # async fn dox() -> io::Result<()> {
/// let sock = UdpSocket::bind("127.0.0.1:8080").await?;
/// sock.set_ttl(60)?;
///
/// # Ok(())
/// # }
/// ```
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.io.set_ttl(ttl)
}
/// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
///
/// This function specifies a new multicast group for this socket to join.
/// The address must be a valid multicast address, and `interface` is the
/// address of the local interface with which the system should join the
/// multicast group. If it's equal to `INADDR_ANY` then an appropriate
/// interface is chosen by the system.
pub fn join_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> {
self.io.join_multicast_v4(&multiaddr, &interface)
}
/// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
///
/// This function specifies a new multicast group for this socket to join.
/// The address must be a valid multicast address, and `interface` is the
/// index of the interface to join/leave (or 0 to indicate any interface).
pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
self.io.join_multicast_v6(multiaddr, interface)
}
/// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
///
/// For more information about this option, see [`join_multicast_v4`].
///
/// [`join_multicast_v4`]: method@Self::join_multicast_v4
pub fn leave_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> {
self.io.leave_multicast_v4(&multiaddr, &interface)
}
/// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
///
/// For more information about this option, see [`join_multicast_v6`].
///
/// [`join_multicast_v6`]: method@Self::join_multicast_v6
pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
self.io.leave_multicast_v6(multiaddr, interface)
}
/// Returns the value of the `SO_ERROR` option.
///
/// # Examples
/// ```
/// use tokio::net::UdpSocket;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// // Create a socket
/// let socket = UdpSocket::bind("0.0.0.0:8080").await?;
///
/// if let Ok(Some(err)) = socket.take_error() {
/// println!("Got error: {:?}", err);
/// }
///
/// Ok(())
/// }
/// ```
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.io.take_error()
}
}
impl TryFrom<std::net::UdpSocket> for UdpSocket {
type Error = io::Error;
/// Consumes stream, returning the tokio I/O object.
///
/// This is equivalent to
/// [`UdpSocket::from_std(stream)`](UdpSocket::from_std).
fn try_from(stream: std::net::UdpSocket) -> Result<Self, Self::Error> {
Self::from_std(stream)
}
}
impl fmt::Debug for UdpSocket {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.io.fmt(f)
}
}
#[cfg(all(unix))]
mod sys {
use super::UdpSocket;
use std::os::unix::prelude::*;
impl AsRawFd for UdpSocket {
fn as_raw_fd(&self) -> RawFd {
self.io.as_raw_fd()
}
}
}
#[cfg(windows)]
mod sys {
use super::UdpSocket;
use std::os::windows::prelude::*;
impl AsRawSocket for UdpSocket {
fn as_raw_socket(&self) -> RawSocket {
self.io.as_raw_socket()
}
}
}
| 34.26874 | 126 | 0.518814 |
abc860a122b77e48d03ba7bef8262cb2283c4bef | 216 | table! {
file_storage (id) {
id -> Integer,
file_name -> Text,
storage_name -> Text,
created_at -> Timestamp,
modified_at -> Timestamp,
is_deleted -> Bool,
}
}
| 19.636364 | 33 | 0.509259 |
eb9e8aa5ef2a38dd3be48e28fc254be3cb6c3a7b | 33,705 | use crate::{SMBiosStruct, UndefinedStruct};
use serde::{ser::SerializeStruct, Serialize, Serializer};
use std::fmt;
use std::ops::Deref;
/// # BIOS Information (Type 0)
pub struct SMBiosInformation<'a> {
parts: &'a UndefinedStruct,
}
impl<'a> SMBiosStruct<'a> for SMBiosInformation<'a> {
const STRUCT_TYPE: u8 = 0u8;
fn new(parts: &'a UndefinedStruct) -> Self {
Self { parts }
}
fn parts(&self) -> &'a UndefinedStruct {
self.parts
}
}
impl<'a> SMBiosInformation<'a> {
/// BIOS vendor's name
pub fn vendor(&self) -> Option<String> {
self.parts.get_field_string(0x4)
}
/// BIOS version
///
/// This value is a free-form string that may contain
/// Core and OEM version information.
pub fn version(&self) -> Option<String> {
self.parts.get_field_string(0x5)
}
/// BIOS starting address segment
///
/// Segment location of BIOS starting address
/// (for example, 0E800h).
///
/// NOTE: The size of the runtime BIOS image can
/// be computed by subtracting the Starting
/// Address Segment from 10000h and
/// multiplying the result by 16.
pub fn starting_address_segment(&self) -> Option<u16> {
self.parts.get_field_word(0x6)
}
/// BIOS release date
///
/// The date string, if supplied, is in either
/// mm/dd/yy or mm/dd/yyyy format. If the year
/// portion of the string is two digits, the year is
/// assumed to be 19yy.
///
/// NOTE: The mm/dd/yyyy format is required for
/// SMBIOS version 2.3 and later.
pub fn release_date(&self) -> Option<String> {
self.parts.get_field_string(0x8)
}
/// BIOS ROM size
///
/// Size (n) where 64K * (n+1) is the size of the
/// physical device containing the BIOS, in
/// bytes.
///
/// FFh - size is 16MB or greater, see Extended
/// BIOS ROM Size for actual size
pub fn rom_size(&self) -> Option<u8> {
self.parts.get_field_byte(0x9)
}
/// BIOS characteristics
///
/// Defines which functions the BIOS supports:
/// PCI, PCMCIA, Flash, etc
pub fn characteristics(&self) -> Option<BiosCharacteristics> {
self.parts
.get_field_dword(0xA)
.map(|raw| BiosCharacteristics::from(raw))
}
/// BIOS vendor reserved characteristics
pub fn bios_vendor_reserved_characteristics(&self) -> Option<u16> {
self.parts.get_field_word(0xE)
}
/// System vendor reserved characteristics
pub fn system_vendor_reserved_characteristics(&self) -> Option<u16> {
self.parts.get_field_word(0x10)
}
/// Characteristics extension byte 0
pub fn characteristics_extension0(&self) -> Option<BiosCharacteristicsExtension0> {
self.parts
.get_field_byte(0x12)
.map(|raw| BiosCharacteristicsExtension0::from(raw))
}
/// Characteristics extension byte 1
pub fn characteristics_extension1(&self) -> Option<BiosCharacteristicsExtension1> {
self.parts
.get_field_byte(0x13)
.map(|raw| BiosCharacteristicsExtension1::from(raw))
}
/// System BIOS major release
///
/// Identifies the major release of the System
/// BIOS; for example, the value is 0Ah for
/// revision 10.22 and 02h for revision 2.1.
///
/// This field or the System BIOS Minor
/// Release field or both are updated each time
/// a System BIOS update for a given system is
/// released.
///
/// If the system does not support the use of
/// this field, the value is 0FFh for both this field
/// and the System BIOS Minor Release field.
pub fn system_bios_major_release(&self) -> Option<u8> {
self.parts.get_field_byte(0x14)
}
/// System BIOS minor release
///
/// Identifies the minor release of the System
/// BIOS; for example, the value is 16h for
/// revision 10.22 and 01h for revision 2.1.
pub fn system_bios_minor_release(&self) -> Option<u8> {
self.parts.get_field_byte(0x15)
}
/// Embedded controller firmware major release
///
/// Identifies the major release of the
/// embedded controller firmware; for example,
/// the value would be 0Ah for revision 10.22
/// and 02h for revision 2.1.
///
/// This field or the Embedded Controller
/// Firmware Minor Release field or both are
/// updated each time an embedded controller
/// firmware update for a given system is
/// released.
///
/// If the system does not have field
/// upgradeable embedded controller firmware,
/// the value is 0FFh.
pub fn e_c_firmware_major_release(&self) -> Option<u8> {
self.parts.get_field_byte(0x16)
}
/// Embedded controller firmware minor release
///
/// Identifies the minor release of the
/// embedded controller firmware; for example,
/// the value is 16h for revision 10.22 and 01h
/// for revision 2.1.
/// If the system does not have field
/// upgradeable embedded controller firmware,
/// the value is 0FFh.
pub fn e_c_firmware_minor_release(&self) -> Option<u8> {
self.parts.get_field_byte(0x17)
}
/// Extended BIOS ROM size
///
/// Extended size of the physical device(s)
/// containing the BIOS, rounded up if needed.
///
/// Bits 15:14 Unit
/// 00b - megabytes
/// 01b - gigabytes
/// 10b - reserved
/// 11b - reserved
/// Bits 13:0 Size
///
/// Examples: a 16 MB device would be
/// represented as 0010h. A 48 GB device set
/// would be represented as
/// 0100_0000_0011_0000b or 4030h.
pub fn extended_rom_size(&self) -> Option<ExtendedRomSize> {
self.parts
.get_field_word(0x18)
.map(|raw| ExtendedRomSize::from(raw))
}
}
/// # Extended BIOS ROM size
#[derive(Serialize, Debug, PartialEq, Eq)]
pub enum ExtendedRomSize {
/// Extended size of the physical device(s)
/// containing the BIOS (in MB).
Megabytes(u16),
/// Extended size of the physical device(s)
/// containing the BIOS (in GB).
Gigabytes(u16),
/// Extended size of the physical device(s)
/// containing the BIOS in raw form.
///
/// The standard currently only defines MB and GB
/// as given in the high nibble (bits 15-14)
Undefined(u16),
}
impl From<u16> for ExtendedRomSize {
fn from(raw: u16) -> Self {
// Bits 15:14 Unit
// 00b - megabytes
// 01b - gigabytes
// 10b - reserved
// 11b - reserved
// Bits 13:0 Size
let unit = raw & 0b11000000_00000000; // 15:14 mask
let size = raw & 0b00111111_11111111; // 13:0 mask
if unit == 0b00000000_00000000 {
ExtendedRomSize::Megabytes(size)
} else if unit == 0b01000000_00000000 {
ExtendedRomSize::Gigabytes(size)
} else {
ExtendedRomSize::Undefined(raw)
}
}
}
impl fmt::Debug for SMBiosInformation<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(std::any::type_name::<SMBiosInformation<'_>>())
.field("header", &self.parts.header)
.field("vendor", &self.vendor())
.field("version", &self.version())
.field("starting_address_segment", &self.starting_address_segment())
.field("release_date", &self.release_date())
.field("rom_size", &self.rom_size())
.field("characteristics", &self.characteristics())
.field(
"bios_vendor_reserved_characteristics",
&self.bios_vendor_reserved_characteristics(),
)
.field(
"system_vendor_reserved_characteristics",
&self.system_vendor_reserved_characteristics(),
)
.field(
"characteristics_extension0",
&self.characteristics_extension0(),
)
.field(
"characteristics_extension1",
&self.characteristics_extension1(),
)
.field(
"system_bios_major_release",
&self.system_bios_major_release(),
)
.field(
"system_bios_minor_release",
&self.system_bios_minor_release(),
)
.field(
"e_c_firmware_major_release",
&self.e_c_firmware_major_release(),
)
.field(
"e_c_firmware_minor_release",
&self.e_c_firmware_minor_release(),
)
.field("extended_rom_size", &self.extended_rom_size())
.finish()
}
}
impl Serialize for SMBiosInformation<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("SMBiosInformation", 16)?;
state.serialize_field("header", &self.parts.header)?;
state.serialize_field("vendor", &self.vendor())?;
state.serialize_field("version", &self.version())?;
state.serialize_field("starting_address_segment", &self.starting_address_segment())?;
state.serialize_field("release_date", &self.release_date())?;
state.serialize_field("rom_size", &self.rom_size())?;
state.serialize_field("characteristics", &self.characteristics())?;
state.serialize_field(
"bios_vendor_reserved_characteristics",
&self.bios_vendor_reserved_characteristics(),
)?;
state.serialize_field(
"system_vendor_reserved_characteristics",
&self.system_vendor_reserved_characteristics(),
)?;
state.serialize_field(
"characteristics_extension0",
&self.characteristics_extension0(),
)?;
state.serialize_field(
"characteristics_extension1",
&self.characteristics_extension1(),
)?;
state.serialize_field(
"system_bios_major_release",
&self.system_bios_major_release(),
)?;
state.serialize_field(
"system_bios_minor_release",
&self.system_bios_minor_release(),
)?;
state.serialize_field(
"e_c_firmware_major_release",
&self.e_c_firmware_major_release(),
)?;
state.serialize_field(
"e_c_firmware_minor_release",
&self.e_c_firmware_minor_release(),
)?;
state.serialize_field("extended_rom_size", &self.extended_rom_size())?;
state.end()
}
}
/// # BIOS Characteristics
#[derive(PartialEq, Eq)]
pub struct BiosCharacteristics {
/// Raw value
pub raw: u32,
}
impl Deref for BiosCharacteristics {
type Target = u32;
fn deref(&self) -> &Self::Target {
&self.raw
}
}
impl From<u32> for BiosCharacteristics {
fn from(raw: u32) -> Self {
BiosCharacteristics { raw }
}
}
impl BiosCharacteristics {
/// Unknown.
pub fn unknown(&self) -> bool {
self.raw & 0x00000004 == 0x00000004
}
/// BIOS Characteristics are not supported.
pub fn bios_characteristics_not_supported(&self) -> bool {
self.raw & 0x00000008 == 0x00000008
}
/// ISA is supported.
pub fn isa_supported(&self) -> bool {
self.raw & 0x00000010 == 0x00000010
}
/// MCA is supported.
pub fn mca_supported(&self) -> bool {
self.raw & 0x00000020 == 0x00000020
}
/// EISA is supported.
pub fn eisa_supported(&self) -> bool {
self.raw & 0x00000040 == 0x00000040
}
/// PCI is supported.
pub fn pci_supported(&self) -> bool {
self.raw & 0x00000080 == 0x00000080
}
/// PC card (PCMCIA) is supported.
pub fn pcmcia_supported(&self) -> bool {
self.raw & 0x00000100 == 0x00000100
}
/// Plug and Play is supported.
pub fn plug_and_play_supported(&self) -> bool {
self.raw & 0x00000200 == 0x00000200
}
/// APM is supported.
pub fn apm_supported(&self) -> bool {
self.raw & 0x00000400 == 0x00000400
}
/// BIOS is upgradeable (Flash).
pub fn bios_upgradeable(&self) -> bool {
self.raw & 0x00000800 == 0x00000800
}
/// BIOS shadowing is allowed.
pub fn bios_shadowing_allowed(&self) -> bool {
self.raw & 0x00001000 == 0x00001000
}
/// VL-VESA is supported.
pub fn vlvesa_supported(&self) -> bool {
self.raw & 0x00002000 == 0x00002000
}
/// ESCD support is available.
pub fn escd_support_available(&self) -> bool {
self.raw & 0x00004000 == 0x00004000
}
/// Boot from CD is supported.
pub fn boot_from_cdsupported(&self) -> bool {
self.raw & 0x00008000 == 0x00008000
}
/// Selectable boot is supported.
pub fn selectable_boot_supported(&self) -> bool {
self.raw & 0x00010000 == 0x00010000
}
/// BIOS ROM is socketed (e.g. PLCC or SOP socket).
pub fn bios_rom_socketed(&self) -> bool {
self.raw & 0x00020000 == 0x00020000
}
/// Boot from PC card (PCMCIA) is supported.
pub fn boot_from_pcmcia_supported(&self) -> bool {
self.raw & 0x00040000 == 0x00040000
}
/// EDD specification is supported.
pub fn edd_specification_supported(&self) -> bool {
self.raw & 0x00080000 == 0x00080000
}
/// Int 13h — Japanese floppy for NEC 9800 1.2 MB (3.5”, 1K bytes/sector, 360 RPM) is supported.
pub fn floppy_nec_japanese_supported(&self) -> bool {
self.raw & 0x00100000 == 0x00100000
}
/// Int 13h — Japanese floppy for Toshiba 1.2 MB (3.5”, 360 RPM) is supported.
pub fn floppy_toshiba_japanese_supported(&self) -> bool {
self.raw & 0x00200000 == 0x00200000
}
/// Int 13h — 5.25” / 360 KB floppy services are supported.
pub fn floppy_525_360_supported(&self) -> bool {
self.raw & 0x00400000 == 0x00400000
}
/// Int 13h — 5.25” /1.2 MB floppy services are supported.
pub fn floppy_525_12_supported(&self) -> bool {
self.raw & 0x00800000 == 0x00800000
}
/// Int 13h — 3.5” / 720 KB floppy services are supported.
pub fn floppy_35_720_supported(&self) -> bool {
self.raw & 0x01000000 == 0x01000000
}
/// Int 13h — 3.5” / 2.88 MB floppy services are supported.
pub fn floppy_35_288_supported(&self) -> bool {
self.raw & 0x02000000 == 0x02000000
}
/// Int 5h, print screen Service is supported.
pub fn print_screen_service_supported(&self) -> bool {
self.raw & 0x04000000 == 0x04000000
}
/// Int 9h, 8042 keyboard services are supported.
pub fn keyboard_8042services_supported(&self) -> bool {
self.raw & 0x08000000 == 0x08000000
}
/// Int 14h, serial services are supported.
pub fn serial_services_supported(&self) -> bool {
self.raw & 0x10000000 == 0x10000000
}
/// Int 17h, printer services are supported.
pub fn printer_services_supported(&self) -> bool {
self.raw & 0x20000000 == 0x20000000
}
/// Int 10h, CGA/Mono Video Services are supported.
pub fn cga_mono_video_services_supported(&self) -> bool {
self.raw & 0x40000000 == 0x40000000
}
/// NEC PC-98.
pub fn nec_pc_98supported(&self) -> bool {
self.raw & 0x80000000 == 0x80000000
}
}
impl fmt::Debug for BiosCharacteristics {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(std::any::type_name::<BiosCharacteristics>())
.field("raw", &self.raw)
.field("unknown", &self.unknown())
.field(
"bios_characteristics_not_supported",
&self.bios_characteristics_not_supported(),
)
.field("isa_supported", &self.isa_supported())
.field("mca_supported", &self.mca_supported())
.field("eisa_supported", &self.eisa_supported())
.field("pci_supported", &self.pci_supported())
.field("pcmcia_supported", &self.pcmcia_supported())
.field("plug_and_play_supported", &self.plug_and_play_supported())
.field("apm_supported", &self.apm_supported())
.field("bios_upgradeable", &self.bios_upgradeable())
.field("bios_shadowing_allowed", &self.bios_shadowing_allowed())
.field("vlvesa_supported", &self.vlvesa_supported())
.field("escd_support_available", &self.escd_support_available())
.field("boot_from_cdsupported", &self.boot_from_cdsupported())
.field(
"selectable_boot_supported",
&self.selectable_boot_supported(),
)
.field("bios_rom_socketed", &self.bios_rom_socketed())
.field(
"boot_from_pcmcia_supported",
&self.boot_from_pcmcia_supported(),
)
.field(
"edd_specification_supported",
&self.edd_specification_supported(),
)
.field(
"floppy_nec_japanese_supported",
&self.floppy_nec_japanese_supported(),
)
.field(
"floppy_toshiba_japanese_supported",
&self.floppy_toshiba_japanese_supported(),
)
.field("floppy_525_360_supported", &self.floppy_525_360_supported())
.field("floppy_525_12_supported", &self.floppy_525_12_supported())
.field("floppy_35_720_supported", &self.floppy_35_720_supported())
.field("floppy_35_288_supported", &self.floppy_35_288_supported())
.field(
"print_screen_service_supported",
&self.print_screen_service_supported(),
)
.field(
"keyboard_8042services_supported",
&self.keyboard_8042services_supported(),
)
.field(
"serial_services_supported",
&self.serial_services_supported(),
)
.field(
"printer_services_supported",
&self.printer_services_supported(),
)
.field(
"cga_mono_video_services_supported",
&self.cga_mono_video_services_supported(),
)
.field("nec_pc_98supported", &self.nec_pc_98supported())
.finish()
}
}
impl Serialize for BiosCharacteristics {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("BiosCharacteristics", 31)?;
state.serialize_field("raw", &self.raw)?;
state.serialize_field("unknown", &self.unknown())?;
state.serialize_field(
"bios_characteristics_not_supported",
&self.bios_characteristics_not_supported(),
)?;
state.serialize_field("isa_supported", &self.isa_supported())?;
state.serialize_field("mca_supported", &self.mca_supported())?;
state.serialize_field("eisa_supported", &self.eisa_supported())?;
state.serialize_field("pci_supported", &self.pci_supported())?;
state.serialize_field("pcmcia_supported", &self.pcmcia_supported())?;
state.serialize_field("plug_and_play_supported", &self.plug_and_play_supported())?;
state.serialize_field("apm_supported", &self.apm_supported())?;
state.serialize_field("bios_upgradeable", &self.bios_upgradeable())?;
state.serialize_field("bios_shadowing_allowed", &self.bios_shadowing_allowed())?;
state.serialize_field("vlvesa_supported", &self.vlvesa_supported())?;
state.serialize_field("escd_support_available", &self.escd_support_available())?;
state.serialize_field("boot_from_cdsupported", &self.boot_from_cdsupported())?;
state.serialize_field(
"selectable_boot_supported",
&self.selectable_boot_supported(),
)?;
state.serialize_field("bios_rom_socketed", &self.bios_rom_socketed())?;
state.serialize_field(
"boot_from_pcmcia_supported",
&self.boot_from_pcmcia_supported(),
)?;
state.serialize_field(
"edd_specification_supported",
&self.edd_specification_supported(),
)?;
state.serialize_field(
"floppy_nec_japanese_supported",
&self.floppy_nec_japanese_supported(),
)?;
state.serialize_field(
"floppy_toshiba_japanese_supported",
&self.floppy_toshiba_japanese_supported(),
)?;
state.serialize_field("floppy_525_360_supported", &self.floppy_525_360_supported())?;
state.serialize_field("floppy_525_12_supported", &self.floppy_525_12_supported())?;
state.serialize_field("floppy_35_720_supported", &self.floppy_35_720_supported())?;
state.serialize_field("floppy_35_288_supported", &self.floppy_35_288_supported())?;
state.serialize_field(
"print_screen_service_supported",
&self.print_screen_service_supported(),
)?;
state.serialize_field(
"keyboard_8042services_supported",
&self.keyboard_8042services_supported(),
)?;
state.serialize_field(
"serial_services_supported",
&self.serial_services_supported(),
)?;
state.serialize_field(
"printer_services_supported",
&self.printer_services_supported(),
)?;
state.serialize_field(
"cga_mono_video_services_supported",
&self.cga_mono_video_services_supported(),
)?;
state.serialize_field("nec_pc_98supported", &self.nec_pc_98supported())?;
state.end()
}
}
/// # BIOS Characteristics Extension Byte 0
#[derive(PartialEq, Eq)]
pub struct BiosCharacteristicsExtension0 {
/// Raw value
pub raw: u8,
}
impl Deref for BiosCharacteristicsExtension0 {
type Target = u8;
fn deref(&self) -> &Self::Target {
&self.raw
}
}
impl From<u8> for BiosCharacteristicsExtension0 {
fn from(raw: u8) -> Self {
BiosCharacteristicsExtension0 { raw }
}
}
impl BiosCharacteristicsExtension0 {
/// ACPI is supported.
pub fn acpi_is_supported(&self) -> bool {
self.raw & 0x01 == 0x01
}
/// USB Legacy is supported.
pub fn usb_legacy_is_supported(&self) -> bool {
self.raw & 0x02 == 0x02
}
/// AGP is supported.
pub fn agp_is_supported(&self) -> bool {
self.raw & 0x04 == 0x04
}
/// I2O boot is supported.
pub fn i2oboot_is_supported(&self) -> bool {
self.raw & 0x08 == 0x08
}
/// LS-120 SuperDisk boot is supported.
pub fn ls120super_disk_boot_is_supported(&self) -> bool {
self.raw & 0x10 == 0x10
}
/// ATAPI ZIP drive boot is supported.
pub fn atapi_zip_drive_boot_is_supported(&self) -> bool {
self.raw & 0x20 == 0x20
}
/// 1394 boot is supported.
pub fn boot_1394is_supported(&self) -> bool {
self.raw & 0x40 == 0x40
}
/// Smart battery is supported.
pub fn smart_battery_is_supported(&self) -> bool {
self.raw & 0x80 == 0x80
}
}
impl fmt::Debug for BiosCharacteristicsExtension0 {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(std::any::type_name::<BiosCharacteristicsExtension0>())
.field("raw", &self.raw)
.field("acpi_is_supported", &self.acpi_is_supported())
.field("usb_legacy_is_supported", &self.usb_legacy_is_supported())
.field("agp_is_supported", &self.agp_is_supported())
.field("i2oboot_is_supported", &self.i2oboot_is_supported())
.field(
"ls120super_disk_boot_is_supported",
&self.ls120super_disk_boot_is_supported(),
)
.field(
"atapi_zip_drive_boot_is_supported",
&self.atapi_zip_drive_boot_is_supported(),
)
.field("boot_1394is_supported", &self.boot_1394is_supported())
.field(
"smart_battery_is_supported",
&self.smart_battery_is_supported(),
)
.finish()
}
}
impl Serialize for BiosCharacteristicsExtension0 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("BiosCharacteristicsExtension0", 9)?;
state.serialize_field("raw", &self.raw)?;
state.serialize_field("acpi_is_supported", &self.acpi_is_supported())?;
state.serialize_field("usb_legacy_is_supported", &self.usb_legacy_is_supported())?;
state.serialize_field("agp_is_supported", &self.agp_is_supported())?;
state.serialize_field("i2oboot_is_supported", &self.i2oboot_is_supported())?;
state.serialize_field(
"ls120super_disk_boot_is_supported",
&self.ls120super_disk_boot_is_supported(),
)?;
state.serialize_field(
"atapi_zip_drive_boot_is_supported",
&self.atapi_zip_drive_boot_is_supported(),
)?;
state.serialize_field("boot_1394is_supported", &self.boot_1394is_supported())?;
state.serialize_field(
"smart_battery_is_supported",
&self.smart_battery_is_supported(),
)?;
state.end()
}
}
/// # BIOS Characteristics Extension Byte 1
#[derive(PartialEq, Eq)]
pub struct BiosCharacteristicsExtension1 {
/// Raw value
pub raw: u8,
}
impl Deref for BiosCharacteristicsExtension1 {
type Target = u8;
fn deref(&self) -> &Self::Target {
&self.raw
}
}
impl From<u8> for BiosCharacteristicsExtension1 {
fn from(raw: u8) -> Self {
BiosCharacteristicsExtension1 { raw }
}
}
impl BiosCharacteristicsExtension1 {
/// BIOS Boot Specification is supported.
pub fn bios_boot_specification_is_supported(&self) -> bool {
self.raw & 0x01 == 0x01
}
/// Function key-initiated network service boot is supported. When function key-uninitiated
/// network service boot is not supported, a network adapter option ROM may choose to offer
/// this functionality on its own, thus offering this capability to legacy systems. When the
/// function is supported, the network adapter option ROM shall not offer this capability.
pub fn fkey_initiated_network_boot_is_supported(&self) -> bool {
self.raw & 0x02 == 0x02
}
/// Enable targeted content distribution. The manufacturer has ensured that the SMBIOS data
/// is useful in identifying the computer for targeted delivery of model-specific software and
/// firmware content through third-party content distribution services.
pub fn targeted_content_distribution_is_supported(&self) -> bool {
self.raw & 0x04 == 0x04
}
/// UEFI Specification is supported.
pub fn uefi_specification_is_supported(&self) -> bool {
self.raw & 0x08 == 0x08
}
/// SMBIOS table describes a virtual machine. (If this bit is not set, no inference can be made
/// about the virtuality of the system.)
pub fn smbios_table_describes_avirtual_machine(&self) -> bool {
self.raw & 0x10 == 0x10
}
}
impl fmt::Debug for BiosCharacteristicsExtension1 {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct(std::any::type_name::<BiosCharacteristicsExtension1>())
.field("raw", &self.raw)
.field(
"bios_boot_specification_is_supported",
&self.bios_boot_specification_is_supported(),
)
.field(
"fkey_initiated_network_boot_is_supported",
&self.fkey_initiated_network_boot_is_supported(),
)
.field(
"targeted_content_distribution_is_supported",
&self.targeted_content_distribution_is_supported(),
)
.field(
"uefi_specification_is_supported",
&self.uefi_specification_is_supported(),
)
.field(
"smbios_table_describes_avirtual_machine",
&self.smbios_table_describes_avirtual_machine(),
)
.finish()
}
}
impl Serialize for BiosCharacteristicsExtension1 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("BiosCharacteristicsExtension1", 6)?;
state.serialize_field("raw", &self.raw)?;
state.serialize_field(
"bios_boot_specification_is_supported",
&self.bios_boot_specification_is_supported(),
)?;
state.serialize_field(
"fkey_initiated_network_boot_is_supported",
&self.fkey_initiated_network_boot_is_supported(),
)?;
state.serialize_field(
"targeted_content_distribution_is_supported",
&self.targeted_content_distribution_is_supported(),
)?;
state.serialize_field(
"uefi_specification_is_supported",
&self.uefi_specification_is_supported(),
)?;
state.serialize_field(
"smbios_table_describes_avirtual_machine",
&self.smbios_table_describes_avirtual_machine(),
)?;
state.end()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn unit_test() {
// BIOS Information structure is sensitive to BIOS specification versions
// and prone to bugs. Therefore, it is important to test different
// structure versions.
//
// The length field specifies:
// 12h + number of BIOS Characteristics
// Extension Bytes. If no Extension Bytes are
// used the Length is 12h.
//
// For version 2.1 and 2.2 implementations, the length is 13h
// because one extension byte is defined.
//
// For version 2.3 and later implementations, the
// length is at least 14h because two extension
// bytes are defined.
//
// For version 2.4 to 3.0, implementations, the length
// is at least 18h because bytes 14-17h are defined.
//
// For version 3.1 and later implementations, the
// length is at least 1Ah because bytes 14-19h
// are defined.
// 2.4 to 3.0 BIOS Information structure. Does not include _extended_rom_size()_
// field or fields beyond.
let struct_type0 = vec![
0x00, 0x18, 0x00, 0x00, 0x01, 0x02, 0x00, 0xF0, 0x03, 0xFF, 0x80, 0x98, 0x8B, 0x3F,
0x01, 0x00, 0x11, 0x00, 0x03, 0x0D, 0x00, 0x21, 0x11, 0x2D, 0x4C, 0x45, 0x4E, 0x4F,
0x56, 0x4F, 0x00, 0x53, 0x30, 0x33, 0x4B, 0x54, 0x33, 0x33, 0x41, 0x00, 0x30, 0x38,
0x2F, 0x30, 0x36, 0x2F, 0x32, 0x30, 0x31, 0x39, 0x00, 0x00,
];
let parts = UndefinedStruct::new(&struct_type0);
let test_struct = SMBiosInformation::new(&parts);
assert_eq!(test_struct.vendor(), Some("LENOVO".to_string()));
assert_eq!(test_struct.version(), Some("S03KT33A".to_string()));
assert_eq!(test_struct.starting_address_segment(), Some(61440));
assert_eq!(test_struct.release_date(), Some("08/06/2019".to_string()));
assert_eq!(test_struct.rom_size(), Some(255));
assert_eq!(
test_struct.characteristics(),
Some(BiosCharacteristics::from(1066113152))
);
assert_eq!(test_struct.bios_vendor_reserved_characteristics(), Some(1));
assert_eq!(
test_struct.system_vendor_reserved_characteristics(),
Some(17)
);
assert_eq!(
test_struct.characteristics_extension0(),
Some(BiosCharacteristicsExtension0::from(3))
);
assert_eq!(
test_struct.characteristics_extension1(),
Some(BiosCharacteristicsExtension1::from(13))
);
assert_eq!(test_struct.system_bios_major_release(), Some(0));
assert_eq!(test_struct.system_bios_minor_release(), Some(33));
assert_eq!(test_struct.e_c_firmware_major_release(), Some(17));
assert_eq!(test_struct.e_c_firmware_minor_release(), Some(45));
// 2.4 to 3.0 BIOS Information does not include _extended_rom_size()_ or
// fields beyond.
assert!(test_struct.extended_rom_size().is_none());
// 3.1 BIOS (includes _extended_rom_size_)
let struct_type0 = vec![
0x00, 0x1A, 0x00, 0x00, 0x01, 0x02, 0x00, 0xF0, 0x03, 0xFF, 0x80, 0x98, 0x8B, 0x3F,
0x01, 0x00, 0x11, 0x00, 0x03, 0x0D, 0x00, 0x21, 0x11, 0x2D, 0x30, 0x40, 0x4C, 0x45,
0x4E, 0x4F, 0x56, 0x4F, 0x00, 0x53, 0x30, 0x33, 0x4B, 0x54, 0x33, 0x33, 0x41, 0x00,
0x30, 0x38, 0x2F, 0x30, 0x36, 0x2F, 0x32, 0x30, 0x31, 0x39, 0x00, 0x00,
];
let parts = UndefinedStruct::new(&struct_type0);
let test_struct = SMBiosInformation::new(&parts);
let extended_rom_size = test_struct.extended_rom_size().unwrap();
assert_eq!(extended_rom_size, ExtendedRomSize::from(0x4030));
match extended_rom_size {
ExtendedRomSize::Gigabytes(size) => assert_eq!(size, 48),
_ => panic!("incorrect unit"),
}
println!("{:?}", test_struct);
}
}
| 34.819215 | 100 | 0.612639 |
23514a1ec069bf8e24b99dd0902d4499bb901e77 | 3,651 | use proc_macro2::TokenStream as TokenStream2;
use syn::{Path, Type, Visibility};
use crate::widgets::{PropertyType, ReturnedWidget, Widget};
#[derive(Debug, Default)]
pub(super) struct TokenStreams {
/// The tokens for the struct fields -> name: Type,
pub struct_fields: TokenStream2,
/// The tokens initializing the widgets.
pub init_widgets: TokenStream2,
/// The tokens connecting widgets.
pub connect_widgets: TokenStream2,
/// The tokens initializing the properties.
pub init_properties: TokenStream2,
/// The tokens for the returned struct fields -> name,
pub return_fields: TokenStream2,
/// The view tokens (watch! macro)
pub view: TokenStream2,
/// The view tokens (track! macro)
pub track: TokenStream2,
/// The tokens for connecting events.
pub connect: TokenStream2,
/// The tokens for connecting events to components.
pub connect_components: TokenStream2,
}
impl Widget {
pub(super) fn generate_micro_widget_tokens_recursively(
&self,
streams: &mut TokenStreams,
vis: &Option<Visibility>,
model_type: &Type,
relm4_path: &Path,
) {
self.struct_fields_stream(&mut streams.struct_fields, vis);
self.init_widgets_stream(&mut streams.init_widgets);
self.return_stream(&mut streams.return_fields);
for prop in &self.properties.properties {
prop.connect_widgets_stream(&mut streams.connect_widgets, &self.name);
if let PropertyType::Widget(widget) = &prop.ty {
widget
.generate_micro_widget_tokens_recursively(streams, vis, model_type, relm4_path);
if let Some(returned_widget) = &widget.returned_widget {
returned_widget.generate_micro_widget_tokens_recursively(
streams, vis, model_type, relm4_path,
);
}
} else {
prop.property_init_stream(&mut streams.init_properties, &self.name, relm4_path);
prop.view_stream(&mut streams.view, &self.name, relm4_path, false);
prop.track_stream(&mut streams.track, &self.name, model_type, false);
prop.connect_stream(&mut streams.connect, &self.name);
prop.connect_component_stream(&mut streams.connect_components, &self.name);
// prop.connect_parent_stream(&mut streams.parent, &self.name);
}
}
}
}
impl ReturnedWidget {
pub(super) fn generate_micro_widget_tokens_recursively(
&self,
streams: &mut TokenStreams,
vis: &Option<Visibility>,
model_type: &Type,
relm4_path: &Path,
) {
self.struct_fields_stream(&mut streams.struct_fields, vis);
self.return_stream(&mut streams.return_fields);
for prop in &self.properties.properties {
prop.connect_widgets_stream(&mut streams.connect_widgets, &self.name);
if let PropertyType::Widget(widget) = &prop.ty {
widget
.generate_micro_widget_tokens_recursively(streams, vis, model_type, relm4_path);
} else {
prop.property_init_stream(&mut streams.init_properties, &self.name, relm4_path);
prop.connect_stream(&mut streams.connect, &self.name);
prop.view_stream(&mut streams.view, &self.name, relm4_path, false);
prop.track_stream(&mut streams.track, &self.name, model_type, false);
prop.connect_component_stream(&mut streams.connect_components, &self.name);
}
}
}
}
| 38.431579 | 100 | 0.63599 |
fbb8cb36f5a16869e98f4a1ec5ff8f2d0e1c9f16 | 19,686 | use super::{BuilderChain, BuilderMap, InitialMapBuilder, Position, TileType};
use crate::a_star_search;
use rltk::RandomNumberGenerator;
use std::collections::HashSet;
pub fn town_builder(
depth: i32,
width: i32,
height: i32,
_rng: &mut RandomNumberGenerator,
) -> BuilderChain {
let mut chain = BuilderChain::new(depth, width, height, "The Town of Bracketon");
chain.start_with(TownBuilder::new());
chain
}
pub struct TownBuilder {}
impl InitialMapBuilder for TownBuilder {
fn build_map(&mut self, rng: &mut rltk::RandomNumberGenerator, build_data: &mut BuilderMap) {
self.build_rooms(rng, build_data);
}
}
#[derive(Debug)]
enum BuildingTag {
Pub,
Temple,
Blacksmith,
Clothier,
Alchemist,
PlayerHouse,
Hovel,
Abandoned,
Unassigned,
}
impl TownBuilder {
pub fn new() -> Box<Self> {
Box::new(TownBuilder {})
}
pub fn build_rooms(
&mut self,
rng: &mut rltk::RandomNumberGenerator,
build_data: &mut BuilderMap,
) {
self.grass_layer(build_data);
self.water_and_piers(rng, build_data);
let (mut available_town_tiles, wall_gap_y) = self.town_walls(rng, build_data);
let mut buildings = self.buildings(rng, build_data, &mut available_town_tiles);
let doors = self.add_doors(rng, build_data, &mut buildings, wall_gap_y);
self.add_paths(build_data, &doors);
for y in wall_gap_y - 3..wall_gap_y + 4 {
let exit_idx = build_data.map.xy_idx(build_data.map.width - 1, y);
build_data.map.tiles[exit_idx] = TileType::DownStairs;
}
build_data.take_snapshot();
let building_size = self.sort_buildings(&buildings);
self.building_factory(rng, build_data, &buildings, &building_size);
self.spawn_dockers(rng, build_data);
self.spawn_townsfolk(rng, build_data, &mut available_town_tiles);
// Make visible for screenshot
for t in build_data.map.visible_tiles.iter_mut() {
*t = true;
}
build_data.take_snapshot();
}
fn grass_layer(&mut self, build_data: &mut BuilderMap) {
// We'll start with a nice layer of grass
for t in build_data.map.tiles.iter_mut() {
*t = TileType::Grass;
}
build_data.take_snapshot();
}
fn water_and_piers(&mut self, rng: &mut RandomNumberGenerator, build_data: &mut BuilderMap) {
let mut n = (rng.roll_dice(1, 65535) as f32) / 65535f32;
let mut water_width = Vec::new();
for y in 0..build_data.map.height {
let n_water = (f32::sin(n) * 10.0) as i32 + 14 + rng.roll_dice(1, 6);
water_width.push(n_water);
n += 0.1;
for x in 0..n_water {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::DeepWater;
}
for x in n_water..n_water + 3 {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::ShallowWater;
}
}
build_data.take_snapshot();
// Add piers
for _i in 0..rng.roll_dice(1, 4) + 6 {
let y = rng.roll_dice(1, build_data.map.height) - 1;
for x in 2 + rng.roll_dice(1, 6)..water_width[y as usize] + 4 {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::Bridge;
}
build_data.take_snapshot();
}
}
fn town_walls(
&mut self,
rng: &mut RandomNumberGenerator,
build_data: &mut BuilderMap,
) -> (HashSet<usize>, i32) {
let mut available_town_tiles = HashSet::new();
let wall_gap_y = rng.roll_dice(1, build_data.map.height - 9) + 5;
for y in 1..build_data.map.height - 2 {
if !(y > wall_gap_y - 4 && y < wall_gap_y + 4) {
let idx = build_data.map.xy_idx(30, y);
build_data.map.tiles[idx] = TileType::Wall;
build_data.map.tiles[idx - 1] = TileType::Floor;
let idx_right = build_data.map.xy_idx(build_data.map.width - 2, y);
build_data.map.tiles[idx_right] = TileType::Wall;
for x in 31..build_data.map.width - 2 {
let gravel_idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[gravel_idx] = TileType::Gravel;
if y > 2 && y < build_data.map.height - 1 {
available_town_tiles.insert(gravel_idx);
}
}
} else {
for x in 30..build_data.map.width {
let road_idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[road_idx] = TileType::Road;
}
}
}
build_data.take_snapshot();
for x in 30..build_data.map.width - 1 {
let idx_top = build_data.map.xy_idx(x, 1);
build_data.map.tiles[idx_top] = TileType::Wall;
let idx_bottom = build_data.map.xy_idx(x, build_data.map.height - 2);
build_data.map.tiles[idx_bottom] = TileType::Wall;
}
build_data.take_snapshot();
(available_town_tiles, wall_gap_y)
}
fn buildings(
&mut self,
rng: &mut RandomNumberGenerator,
build_data: &mut BuilderMap,
available_town_tiles: &mut HashSet<usize>,
) -> Vec<(i32, i32, i32, i32)> {
let mut buildings = Vec::new();
let mut n_buildings = 0;
let mut n_attempts = 0;
while n_buildings < 12 && n_attempts < 2000 {
n_attempts += 1;
let bx = rng.roll_dice(1, build_data.map.width - 32) + 30;
let by = rng.roll_dice(1, build_data.map.height) - 2;
let bw = rng.roll_dice(1, 8) + 4;
let bh = rng.roll_dice(1, 8) + 4;
let mut possible = true;
for y in by..by + bh {
for x in bx..bx + bw {
if x < 0
|| x > build_data.map.width - 1
|| y < 0
|| y > build_data.map.height - 1
{
possible = false;
} else {
let idx = build_data.map.xy_idx(x, y);
if !available_town_tiles.contains(&idx) {
possible = false;
}
}
}
}
if possible {
n_buildings += 1;
buildings.push((bx, by, bw, bh));
for y in by..by + bh {
for x in bx..bx + bw {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::WoodFloor;
available_town_tiles.remove(&idx);
available_town_tiles.remove(&(idx + 1));
available_town_tiles.remove(&(idx + build_data.map.width as usize));
available_town_tiles.remove(&(idx - 1));
available_town_tiles.remove(&(idx - build_data.map.width as usize));
}
}
build_data.take_snapshot();
}
}
// Outline buildings
let map_clone = build_data.map.clone();
for y in 2..map_clone.height - 2 {
for x in 31..map_clone.width - 2 {
let idx = map_clone.xy_idx(x, y);
if map_clone.tiles[idx] == TileType::WoodFloor {
let mut non_floor_neighbors = 0;
if map_clone.tiles[idx - 1] != TileType::WoodFloor {
non_floor_neighbors += 1;
}
if map_clone.tiles[idx + 1] != TileType::WoodFloor {
non_floor_neighbors += 1;
}
if map_clone.tiles[idx - map_clone.width as usize] != TileType::WoodFloor {
non_floor_neighbors += 1;
}
if map_clone.tiles[idx + map_clone.width as usize] != TileType::WoodFloor {
non_floor_neighbors += 1;
}
if non_floor_neighbors > 0 {
build_data.map.tiles[idx] = TileType::Wall;
}
}
}
}
build_data.take_snapshot();
buildings
}
fn add_doors(
&mut self,
rng: &mut RandomNumberGenerator,
build_data: &mut BuilderMap,
buildings: &mut Vec<(i32, i32, i32, i32)>,
wall_gap_y: i32,
) -> Vec<usize> {
let mut doors = Vec::new();
for (bx, by, bw, bh) in buildings.iter() {
let door_x = bx + 1 + rng.roll_dice(1, bw - 3);
let cy = by + (bh / 2);
let idx = if cy > wall_gap_y {
// Door on the north wall
build_data.map.xy_idx(door_x, *by)
} else {
build_data.map.xy_idx(door_x, by + bh - 1)
};
build_data.map.tiles[idx] = TileType::Floor;
build_data.spawn_list.push((idx, "Door".to_string()));
doors.push(idx);
}
build_data.take_snapshot();
doors
}
fn add_paths(&mut self, build_data: &mut BuilderMap, doors: &[usize]) {
let mut roads = Vec::new();
for y in 0..build_data.map.height {
for x in 0..build_data.map.width {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::Road {
roads.push(idx);
}
}
}
build_data.map.populate_blocked();
for door_idx in doors.iter() {
let mut nearest_roads = Vec::new();
let door_pt = rltk::Point::new(
*door_idx as i32 % build_data.map.width as i32,
*door_idx as i32 / build_data.map.width as i32,
);
for r in roads.iter() {
nearest_roads.push((
*r,
rltk::DistanceAlg::PythagorasSquared.distance2d(
door_pt,
rltk::Point::new(
*r as i32 % build_data.map.width as i32,
*r as i32 / build_data.map.width as i32,
),
),
))
}
nearest_roads.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
let destination = rltk::Point::new(
nearest_roads[0].0 as i32 % build_data.map.width as i32,
nearest_roads[0].0 as i32 / build_data.map.width as i32,
);
if let Some((path, _cost)) = a_star_search(door_pt, destination, 0., &build_data.map) {
for step in path.iter() {
let idx = build_data.map.xy_idx(step.x, step.y);
build_data.map.tiles[idx] = TileType::Road;
roads.push(idx);
}
}
build_data.take_snapshot();
}
}
fn sort_buildings(
&mut self,
buildings: &[(i32, i32, i32, i32)],
) -> Vec<(usize, i32, BuildingTag)> {
let mut building_size: Vec<(usize, i32, BuildingTag)> = Vec::new();
for (i, (_bx, _by, bw, bh)) in buildings.iter().enumerate() {
building_size.push((i, bw * bh, BuildingTag::Unassigned));
}
building_size.sort_by(|a, b| b.1.cmp(&a.1));
for (i, b) in building_size.iter_mut().enumerate() {
b.2 = match i {
0 => BuildingTag::Pub,
1 => BuildingTag::Temple,
2 => BuildingTag::Blacksmith,
3 => BuildingTag::Clothier,
4 => BuildingTag::Alchemist,
5 => BuildingTag::PlayerHouse,
_ => BuildingTag::Hovel,
}
}
let last_index = building_size.len() - 1;
building_size[last_index].2 = BuildingTag::Abandoned;
building_size
}
fn building_factory(
&mut self,
rng: &mut rltk::RandomNumberGenerator,
build_data: &mut BuilderMap,
buildings: &[(i32, i32, i32, i32)],
building_index: &[(usize, i32, BuildingTag)],
) {
for (i, _size, build_type) in building_index.iter() {
let building = &buildings[*i];
match build_type {
BuildingTag::Pub => self.build_pub(building, build_data, rng),
BuildingTag::Temple => self.build_temple(building, build_data, rng),
BuildingTag::Blacksmith => self.build_smith(building, build_data, rng),
BuildingTag::Clothier => self.build_clothier(building, build_data, rng),
BuildingTag::Alchemist => self.build_alchemist(building, build_data, rng),
BuildingTag::PlayerHouse => self.build_my_house(building, build_data, rng),
BuildingTag::Hovel => self.build_hovel(building, build_data, rng),
BuildingTag::Abandoned => self.build_abandoned_house(building, build_data, rng),
_ => {}
}
}
}
fn random_building_spawn(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut RandomNumberGenerator,
to_place: &mut Vec<&str>,
player_idx: usize,
) {
to_place.reverse(); // So it is easy to pop() in order
let (bx, by, bw, bh) = *building;
for y in by..by + bh {
for x in bx..bx + bw {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::WoodFloor
&& idx != player_idx
&& rng.roll_dice(1, 3) == 1
{
if let Some(entity_tag) = to_place.pop() {
build_data.spawn_list.push((idx, entity_tag.to_string()));
}
};
}
}
}
fn build_pub(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut RandomNumberGenerator,
) {
// Place the Player
let (pub_x, pub_y, pub_w, pub_h) = *building;
let player_x = pub_x + pub_w / 2;
let player_y = pub_y + pub_h / 2;
let player_idx = build_data.map.xy_idx(player_x, player_y);
build_data.starting_position = Some(Position {
x: player_x,
y: player_y,
});
// Place other items
let mut to_place = vec![
"Barkeep",
"Shady Salesman",
"Patron",
"Patron",
"Keg",
"Table",
"Chair",
"Table",
"Chair",
];
self.random_building_spawn(building, build_data, rng, &mut to_place, player_idx);
}
fn build_temple(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut RandomNumberGenerator,
) {
// Place items
let mut to_place = vec![
"Priest",
"Parishioner",
"Parishioner",
"Chair",
"Chair",
"Candle",
"Candle",
];
self.random_building_spawn(building, build_data, rng, &mut to_place, usize::MAX);
}
fn build_smith(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
) {
// Place items
let mut to_place = vec![
"Blacksmith",
"Anvil",
"Water Trough",
"Weapon Rack",
"Armor Stand",
];
self.random_building_spawn(building, build_data, rng, &mut to_place, usize::MAX);
}
fn build_clothier(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
) {
// Place items
let mut to_place = vec!["Clothier", "Cabinet", "Table", "Loom", "Hide Rack"];
self.random_building_spawn(building, build_data, rng, &mut to_place, usize::MAX);
}
fn build_alchemist(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
) {
// Place items
let mut to_place = vec!["Alchemist", "Chemistry Set", "Dead Thing", "Chair", "Table"];
self.random_building_spawn(building, build_data, rng, &mut to_place, usize::MAX);
}
fn build_my_house(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
) {
// Place items
let mut to_place = vec!["Mom", "Bed", "Cabinet", "Chair", "Table"];
self.random_building_spawn(building, build_data, rng, &mut to_place, usize::MAX);
}
fn build_hovel(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
) {
// Place items
let mut to_place = vec!["Peasant", "Bed", "Chair", "Table"];
self.random_building_spawn(building, build_data, rng, &mut to_place, usize::MAX);
}
fn build_abandoned_house(
&mut self,
building: &(i32, i32, i32, i32),
build_data: &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
) {
let (bx, by, bw, bh) = *building;
for y in by..by + bh {
for x in bx..bx + bw {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::WoodFloor
&& idx != 0
&& rng.roll_dice(1, 2) == 1
{
build_data.spawn_list.push((idx, "Rat".to_string()));
}
}
}
}
fn spawn_dockers(&mut self, rng: &mut RandomNumberGenerator, build_data: &mut BuilderMap) {
for (idx, tt) in build_data.map.tiles.iter().enumerate() {
if *tt == TileType::Bridge && rng.roll_dice(1, 6) == 1 {
let roll = rng.roll_dice(1, 3);
match roll {
1 => build_data.spawn_list.push((idx, "Dock Worker".to_string())),
2 => build_data
.spawn_list
.push((idx, "Wannabe Pirate".to_string())),
_ => build_data.spawn_list.push((idx, "Fisher".to_string())),
}
}
}
}
fn spawn_townsfolk(
&mut self,
rng: &mut RandomNumberGenerator,
build_data: &mut BuilderMap,
available_town_tiles: &mut HashSet<usize>,
) {
for idx in available_town_tiles.iter() {
if rng.roll_dice(1, 10) == 1 {
let roll = rng.roll_dice(1, 4);
match roll {
1 => build_data.spawn_list.push((*idx, "Peasant".to_string())),
2 => build_data.spawn_list.push((*idx, "Drunk".to_string())),
3 => build_data
.spawn_list
.push((*idx, "Dock Worker".to_string())),
_ => build_data.spawn_list.push((*idx, "Fisher".to_string())),
}
}
}
}
}
| 35.989031 | 99 | 0.507315 |
eb07ac4c9b73af0314d7cbdd22d67ddc5041576a | 2,764 | // Built-in Lints
#![deny(warnings, missing_debug_implementations, missing_copy_implementations)]
// Clippy lints
#![allow(
clippy::option_map_unwrap_or_else,
clippy::option_map_unwrap_or,
clippy::match_same_arms,
clippy::type_complexity,
clippy::needless_doctest_main
)]
#![warn(
clippy::option_unwrap_used,
clippy::print_stdout,
clippy::wrong_pub_self_convention,
clippy::mut_mut,
clippy::non_ascii_literal,
clippy::similar_names,
clippy::unicode_not_nfc,
clippy::enum_glob_use,
clippy::if_not_else,
clippy::items_after_statements,
clippy::used_underscore_binding
)]
#![cfg_attr(test, allow(clippy::option_unwrap_used, clippy::result_unwrap_used))]
extern crate proc_macro;
mod embed_migrations;
mod migrations;
use proc_macro::TokenStream;
/// This macro will read your migrations at compile time, and embed a module you can use to execute
/// them at runtime without the migration files being present on the file system. This is useful if
/// you would like to use Diesel's migration infrastructure, but want to ship a single executable
/// file (such as for embedded applications). It can also be used to apply migrations to an in
/// memory database (Diesel does this for its own test suite).
///
/// You can optionally pass the path to the migrations directory to this macro. When left
/// unspecified, Diesel Codegen will search for the migrations directory in the same way that
/// Diesel CLI does. If specified, the path should be relative to the directory where `Cargo.toml`
/// resides.
///
/// # Examples
///
/// ```rust
/// # use diesel_migrations::embed_migrations;
/// # include!("../../../diesel/src/doctest_setup.rs");
/// # table! {
/// # users {
/// # id -> Integer,
/// # name -> VarChar,
/// # }
/// # }
/// #
/// # #[cfg(feature = "postgres")]
/// # embed_migrations!("../../migrations/postgresql");
/// # #[cfg(all(feature = "mysql", not(feature = "postgres")))]
/// # embed_migrations!("../../migrations/mysql");
/// # #[cfg(all(feature = "sqlite", not(any(feature = "postgres", feature = "mysql"))))]
/// embed_migrations!("../../migrations/sqlite");
///
/// fn main() {
/// let connection = establish_connection();
///
/// // This will run the necessary migrations.
/// embedded_migrations::run(&connection);
///
/// // By default the output is thrown out. If you want to redirect it to stdout, you
/// // should call embedded_migrations::run_with_output.
/// embedded_migrations::run_with_output(&connection, &mut std::io::stdout());
/// }
/// ```
#[proc_macro]
pub fn embed_migrations(input: TokenStream) -> TokenStream {
embed_migrations::expand(input.to_string())
.to_string()
.parse()
.unwrap()
}
| 34.55 | 99 | 0.680897 |
21f9e8f29e5da009f7311796591c3d0df3f545fa | 1,697 | pub trait Messenger {
fn send(&self, msg: &str);
}
pub struct LimitTracker<'a, T: Messenger> {
messenger: &'a T,
value: usize,
max: usize,
}
impl<'a, T> LimitTracker<'a, T>
where T: Messenger {
pub fn new(messenger: &T, max: usize) -> LimitTracker<T> {
LimitTracker {
messenger,
value: 0,
max,
}
}
pub fn set_value(&mut self, value: usize) {
self.value = value;
let percentage_of_max = self.value as f64 / self.max as f64;
if percentage_of_max >= 1.0 {
self.messenger.send("Error: You are over your quota!");
} else if percentage_of_max >= 0.9 {
self.messenger.send("Urgent warning: You've used up over 90% of your quota!");
} else if percentage_of_max >= 0.75 {
self.messenger.send("Warning: You've used up over 75% of your quota!");
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::cell::RefCell;
struct MockMessenger {
sent_messages: RefCell<Vec<String>>,
}
impl MockMessenger {
fn new() -> MockMessenger {
MockMessenger { sent_messages: RefCell::new(vec![]) }
}
}
impl Messenger for MockMessenger {
fn send(&self, message: &str) {
self.sent_messages.borrow_mut().push(String::from(message));
}
}
#[test]
fn it_sends_an_over_75_percent_warning_message() {
let mock_messenger = MockMessenger::new();
let mut limit_tracker = LimitTracker::new(&mock_messenger, 100);
limit_tracker.set_value(80);
assert_eq!(mock_messenger.sent_messages.borrow().len(), 1);
}
}
| 25.328358 | 90 | 0.573954 |
f84d7a3e128bb8c2927f014ae48cf137b44466e7 | 438 | //! Module for reading and writing Mobile-Originated (MO) SBD messages.
//!
//! Though messages technically come in two flavors, mobile originated and mobile terminated, we
//! only handle mobile originated messages in this library.
mod header;
mod information_element;
mod message;
mod session_status;
pub use self::{
header::Header, information_element::InformationElement, message::Message,
session_status::SessionStatus,
};
| 29.2 | 96 | 0.771689 |
145b5c324a00b5350d3e7d766ec2c48fc4845242 | 16,767 | // Copyright 2018-2020, Wayfair GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// use crate::{NodeId, KV};
use super::Reply as WsReply;
use super::*;
use crate::version::VERSION;
use crate::{pubsub, NodeId};
use async_std::net::TcpListener;
use async_std::net::ToSocketAddrs;
use async_std::task;
use futures::channel::mpsc::{channel, Receiver, Sender};
use futures::io::{AsyncRead, AsyncWrite};
use futures::{select, FutureExt, StreamExt};
use std::io::Error;
use tungstenite::protocol::Message;
use ws_proto::*;
/// websocket connection is long running connection, it easier
/// to handle with an actor
pub(crate) struct Connection {
node: Node,
remote_id: NodeId,
protocol: Option<Protocol>,
rx: Receiver<Message>,
tx: Sender<Message>,
ws_rx: Receiver<WsMessage>,
ws_tx: Sender<WsMessage>,
ps_rx: Receiver<SubscriberMsg>,
ps_tx: Sender<SubscriberMsg>,
}
impl Connection {
pub(crate) fn new(node: Node, rx: Receiver<Message>, tx: Sender<Message>) -> Self {
let (ps_tx, ps_rx) = channel(crate::CHANNEL_SIZE);
let (ws_tx, ws_rx) = channel(crate::CHANNEL_SIZE);
Self {
node,
remote_id: NodeId(0),
protocol: None,
rx,
tx,
ps_tx,
ps_rx,
ws_tx,
ws_rx,
}
}
async fn handle_initial(&mut self, msg: Message) -> bool {
self.handle_control(msg, false).await
}
async fn handle_control(&mut self, msg: Message, bail_on_fail: bool) -> bool {
if msg.is_text() {
let text = msg.into_data();
match serde_json::from_slice(&text) {
Ok(ProtocolSelect::Status { rid }) => self
.node
.tx
.send(UrMsg::Status(rid, self.ws_tx.clone()))
.await
.is_ok(),
Ok(ProtocolSelect::Version { .. }) => self
.tx
.send(Message::Text(serde_json::to_string(VERSION).unwrap()))
.await
.is_ok(),
Ok(ProtocolSelect::Select { rid, protocol }) => {
self.protocol = Some(protocol);
self.tx
.send(Message::Text(
serde_json::to_string(&ProtocolSelect::Selected { rid, protocol })
.unwrap(),
))
.await
.is_ok()
}
Ok(ProtocolSelect::Selected { .. }) => false,
Ok(ProtocolSelect::As { protocol, cmd }) => match protocol {
Protocol::KV => self.handle_kv_msg(serde_json::from_value(cmd).unwrap()),
_ => false,
},
Ok(ProtocolSelect::Subscribe { channel }) => self
.node
.pubsub
.send(pubsub::Msg::Subscribe {
channel,
tx: self.ps_tx.clone(),
})
.await
.is_ok(),
Err(e) => {
if !bail_on_fail {
false
} else {
error!(
self.node.logger,
"Failed to decode ProtocolSelect message: {} => {}",
e,
String::from_utf8(text).unwrap_or_default()
);
true
}
}
}
} else {
true
}
}
async fn handle_uring(&mut self, msg: Message) -> bool {
if msg.is_text() {
let text = msg.into_data();
match serde_json::from_slice(&text) {
Ok(CtrlMsg::Hello(id, peer)) => {
info!(self.node.logger, "Hello from {}", id);
self.remote_id = id;
self.node
.tx
.unbounded_send(UrMsg::RegisterRemote(id, peer, self.ws_tx.clone()))
.is_ok()
}
Ok(CtrlMsg::AckProposal(pid, success)) => self
.node
.tx
.unbounded_send(UrMsg::AckProposal(pid, success))
.is_ok(),
Ok(CtrlMsg::ForwardProposal(from, pid, sid, eid, value)) => self
.node
.tx
.unbounded_send(UrMsg::ForwardProposal(from, pid, sid, eid, value))
.is_ok(),
Ok(_) => true,
Err(e) => {
error!(
self.node.logger,
"Failed to decode CtrlMsg message: {} => {}",
e,
String::from_utf8(text).unwrap_or_default()
);
false
}
}
} else if msg.is_binary() {
let bin = msg.into_data();
let msg = decode_ws(&bin);
self.node.tx.unbounded_send(UrMsg::RaftMsg(msg)).is_ok()
} else {
true
}
}
async fn handle_kv(&mut self, msg: Message) -> bool {
if msg.is_text() {
let text = msg.into_data();
match serde_json::from_slice(&text) {
Ok(msg) => self.handle_kv_msg(msg),
Err(e) => {
error!(
self.node.logger,
"Failed to decode KVRequest message: {} => {}",
e,
String::from_utf8(text).unwrap_or_default()
);
false
}
}
} else {
true
}
}
fn handle_kv_msg(&mut self, msg: KVRequest) -> bool {
match msg {
KVRequest::Get { rid, key } => self
.node
.tx
.unbounded_send(UrMsg::Get(
key.into_bytes(),
WsReply(rid, self.ws_tx.clone()),
))
.is_ok(),
KVRequest::Put { rid, key, store } => self
.node
.tx
.unbounded_send(UrMsg::Put(
key.into_bytes(),
store.into_bytes(),
WsReply(rid, self.ws_tx.clone()),
))
.is_ok(),
KVRequest::Delete { rid, key } => self
.node
.tx
.unbounded_send(UrMsg::Delete(
key.into_bytes(),
WsReply(rid, self.ws_tx.clone()),
))
.is_ok(),
KVRequest::Cas {
rid,
key,
check,
store,
} => self
.node
.tx
.unbounded_send(UrMsg::Cas(
key.into_bytes(),
check.map(String::into_bytes),
store.into_bytes(),
WsReply(rid, self.ws_tx.clone()),
))
.is_ok(),
}
}
async fn handle_mring(&mut self, msg: Message) -> bool {
if msg.is_text() {
let text = msg.into_data();
match serde_json::from_slice(&text) {
Ok(msg) => self.handle_mring_msg(msg).await,
Err(e) => {
error!(
self.node.logger,
"Failed to decode MRRequest message: {} => {}",
e,
String::from_utf8(text).unwrap_or_default()
);
false
}
}
} else {
true
}
}
async fn handle_mring_msg(&mut self, msg: MRRequest) -> bool {
match msg {
MRRequest::GetSize { rid } => self
.node
.tx
.unbounded_send(UrMsg::MRingGetSize(WsReply(rid, self.ws_tx.clone())))
.is_ok(),
MRRequest::SetSize { rid, size } => self
.node
.tx
.unbounded_send(UrMsg::MRingSetSize(size, WsReply(rid, self.ws_tx.clone())))
.is_ok(),
MRRequest::GetNodes { rid } => self
.node
.tx
.unbounded_send(UrMsg::MRingGetNodes(WsReply(rid, self.ws_tx.clone())))
.is_ok(),
MRRequest::AddNode { rid, node } => self
.node
.tx
.unbounded_send(UrMsg::MRingAddNode(node, WsReply(rid, self.ws_tx.clone())))
.is_ok(),
MRRequest::RemoveNode { rid, node } => self
.node
.tx
.unbounded_send(UrMsg::MRingRemoveNode(
node,
WsReply(rid, self.ws_tx.clone()),
))
.is_ok(),
}
}
async fn handle_version(&mut self, msg: Message) -> bool {
if msg.is_text() {
let text = msg.into_data();
match serde_json::from_slice(&text) {
Ok(msg) => self.handle_version_msg(msg),
Err(e) => {
error!(
self.node.logger,
"Failed to decode VRequest message: {} => {}",
e,
String::from_utf8(text).unwrap_or_default()
);
false
}
}
} else {
true
}
}
fn handle_version_msg(&mut self, msg: VRequest) -> bool {
match msg {
VRequest::Get { rid } => {
self.node
.tx
.unbounded_send(UrMsg::Version(rid, self.ws_tx.clone()))
.unwrap();
true
}
}
}
async fn handle_status(&mut self, msg: Message) -> bool {
if msg.is_text() {
let text = msg.into_data();
match serde_json::from_slice(&text) {
Ok(msg) => self.handle_status_msg(msg),
Err(e) => {
error!(
self.node.logger,
"Failed to decode SRequest message: {} => {}",
e,
String::from_utf8(text).unwrap_or_default()
);
false
}
}
} else {
true
}
}
fn handle_status_msg(&mut self, msg: SRequest) -> bool {
match msg {
SRequest::Get { rid } => {
self.node
.tx
.unbounded_send(UrMsg::Status(rid, self.ws_tx.clone()))
.unwrap();
true
}
}
}
pub async fn msg_loop(mut self, logger: Logger) {
loop {
let cont = select! {
msg = self.rx.next() => {
if let Some(msg) = msg {
let msg2 = msg.clone();
let handled_ok = match self.protocol {
None => self.handle_initial(msg).await,
Some(Protocol::KV) => self.handle_kv(msg).await,
Some(Protocol::URing) => self.handle_uring(msg).await,
Some(Protocol::MRing) => self.handle_mring(msg).await,
Some(Protocol::Version) => self.handle_version(msg).await,
Some(Protocol::Status) => self.handle_status(msg).await,
};
handled_ok || self.handle_control(msg2, true).await
} else {
false
}
}
msg = self.ws_rx.next() => {
match self.protocol {
None | Some(Protocol::Status) | Some(Protocol::Version) | Some(Protocol::KV) | Some(Protocol::MRing) => match msg {
Some(WsMessage::Ctrl(msg)) =>self.tx.send(Message::Text(serde_json::to_string(&msg).unwrap())).await.is_ok(),
Some(WsMessage::Reply(_, msg)) =>self.tx.send(Message::Text(serde_json::to_string(&msg).unwrap())).await.is_ok(),
None | Some(WsMessage::Raft(_)) => false,
}
Some(Protocol::URing) => match msg {
Some(WsMessage::Ctrl(msg)) =>self.tx.send(Message::Text(serde_json::to_string(&msg).unwrap())).await.is_ok(),
Some(WsMessage::Raft(msg)) => self.tx.send(Message::Binary(encode_ws(msg).to_vec())).await.is_ok(),
Some(WsMessage::Reply(_, msg)) =>self.tx.send(Message::Text(serde_json::to_string(&msg).unwrap())).await.is_ok(),
None => false,
},
}
}
msg = self.ps_rx.next() => {
if let Some(msg) = msg {
self.tx.send(Message::Text(serde_json::to_string(&msg).unwrap())).await.is_ok()
} else {
false
}
}
complete => false
};
if !cont {
error!(logger, "Client connection to {} down.", self.remote_id);
self.node
.tx
.unbounded_send(UrMsg::DownRemote(self.remote_id))
.unwrap();
break;
}
}
}
}
pub(crate) async fn accept_connection<S>(logger: Logger, node: Node, stream: S)
where
S: AsyncRead + AsyncWrite + Unpin,
{
let mut ws_stream = if let Ok(ws_stream) = async_tungstenite::accept_async(stream).await {
ws_stream
} else {
error!(logger, "Error during the websocket handshake occurred");
return;
};
// Create a channel for our stream, which other sockets will use to
// send us messages. Then register our address with the stream to send
// data to us.
let (mut msg_tx, msg_rx) = channel(crate::CHANNEL_SIZE);
let (response_tx, mut response_rx) = channel(crate::CHANNEL_SIZE);
let c = Connection::new(node, msg_rx, response_tx);
task::spawn(c.msg_loop(logger.clone()));
loop {
select! {
message = ws_stream.next().fuse() => {
if let Some(Ok(message)) = message {
msg_tx
.send(message).await
.expect("Failed to forward request");
} else {
error!(logger, "Client connection down.", );
break;
}
}
resp = response_rx.next() => {
if let Some(resp) = resp {
ws_stream.send(resp).await.expect("Failed to send response");
} else {
error!(logger, "Client connection down.", );
break;
}
}
complete => {
error!(logger, "Client connection down.", );
break;
}
}
}
}
pub(crate) async fn run(logger: Logger, node: Node, addr: String) -> Result<(), Error> {
let addr = addr
.to_socket_addrs()
.await
.expect("Not a valid address")
.next()
.expect("Not a socket address");
// Create the event loop and TCP listener we'll accept connections on.
let try_socket = TcpListener::bind(&addr).await;
let listener = try_socket.expect("Failed to bind");
info!(logger, "Listening on: {}", addr);
while let Ok((stream, _)) = listener.accept().await {
task::spawn(accept_connection(logger.clone(), node.clone(), stream));
}
Ok(())
}
| 35.523305 | 141 | 0.437586 |
2f8f23991314e4113d38fc437449d1e35cbb90e4 | 13,596 | //! impl warmup_1 {}
use std::cmp::max;
//==========//
// Warmup 1 //
//==========//
/// Given a string, return a new string where the
/// last 3 chars are now in upper case. If the
/// string has less than 3 chars, uppercase whatever
/// is there.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// end_up("Hello"); → "HeLLO"
/// end_up("hi there"); → "hi thERE"
/// end_up("hi"); → "HI"
/// ```
pub fn end_up(string: &str) -> String {
let mut new_string: String = "".to_string();
let mut white_counter = String::from(string)
.chars()
.filter(|c| !c.is_whitespace())
.count();
if string.len() < 2 {
return string.clone().to_uppercase();
}
for j in string.split_whitespace() {
new_string.push_str(&j.chars().take(2).collect::<String>());
new_string.push_str(&j.chars().skip(2).collect::<String>().to_uppercase());
if white_counter > 0 {
new_string.push(' ');
white_counter -= 1;
}
}
return new_string;
}
/// Given two int values, return their sum. Unless
/// the two values are the same, then return double
/// their sum.
///
/// # Examples
///
/// Basic Usage:
///
/// ```
/// sumDouble(1, 2); → 3
/// sumDouble(3, 2); → 5
/// sumDouble(2, 2); → 8
/// ```
pub fn sum_double(a: i32, b: i32) -> i32 {
return if a == b { (a + b) * 2 } else { a + b };
}
/// Return true if the given string contains between
/// 1 and 3 'e' chars.
///
/// # Examples
///
/// Basic Usage:
///
/// ```
/// stringE("Hello") → true
/// stringE("Heelle") → true
/// stringE("Heelele") → false
/// ```
pub fn string_e(e: &str) -> bool {
if e.len() == 0 {
return false;
}
if e.len() == 1 && e != "e" {
return false;
}
let mut counter: i32 = 0;
for i in e.split("") {
if i == "e" {
counter += 1;
}
}
return counter >= 1 && counter <= 3;
}
/// Given a string, take the first 2
/// chars and return the string with the
/// 2 chars added at both the front and
/// back, so "kitten" yields"kikittenki". If
/// the string length is less than 2, use
/// whatever chars are there.
///
/// # Examples
///
/// Basic Usage:
///
/// ```
/// front22("kitten") → "kikittenki"
/// front22("Ha") → "HaHaHa"
/// front22("abc") → "ababcab"
/// ```
pub fn front_22(string: &str) -> String {
let mut new_string: String = "".to_string();
new_string.push_str(&string.chars().take(2).collect::<String>());
new_string.push_str(&string);
new_string.push_str(&string.chars().take(2).collect::<String>());
return new_string;
}
/// The parameter weekday is true if it is a
/// weekday, and the parameter vacation is true
/// if we are on vacation. We sleep in if it is
/// not a weekday or we're on vacation. Return
/// true if we sleep in.
///
/// # Example
///
/// Basic Usage:
///
/// ```
///
/// ```
pub fn sleep_in(weekday: bool, vacation: bool) -> bool {
return !weekday || vacation;
}
/// Given 2 int values, return true if one is
/// negative and one is positive. Except if the
/// parameter "negative" is true, then return
/// true only if both are negative
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// posNeg(1, -1, false); → true
/// posNeg(-1, 1, false); → true
/// posNeg(-4, -5, true); → true
/// ```
pub fn pos_neg(a: i32, b: i32, negative: bool) -> bool {
if negative {
return a < 0 && b < 0;
}
return (a < 0 && b > 0) || (a > 0 && b < 0);
}
/// We have a loud talking parrot. The "hour"
/// parameter is the current hour time in the
/// range 0..23. We are in trouble if the parrot
/// is talking and the hour is before 7 or after
/// 20. Return true if we are in trouble.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// parrotTrouble(true, 6) → true
/// parrotTrouble(true, 7) → false
/// parrotTrouble(false, 6) → false
/// ```
pub fn parrot_trouble(talking: bool, hour: i32) -> bool {
return (hour < 7 || hour > 20) && talking;
}
/// Given a string, return a new string where "not"
/// has been added to the front. However, if the
/// string already begins with "not", return the string
/// unchanged. Note: use .equals() to compare 2 strings.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// notString("candy"); → "not candy"
/// ```
pub fn not_string(string: &str) -> String {
for s in string.split_whitespace() {
if s == "not" {
return string.to_string();
}
}
let mut new_string: String = "not ".to_owned();
new_string.push_str(&string);
return new_string;
}
/// Given an int n, return true if it is within 10 of 100 or 200.
/// Note: Math.abs(num) computes the absolute value of a number.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// nearHundred(93); → true
/// nearHundred(90); → true
/// nearHundred(89); → false
/// ```
pub fn near_hundred(n: i32) -> bool {
return ((100 - n).abs() < 10) || ((200 - n).abs() < 10);
}
/// We have two monkeys, a and b, and the parameters
/// aSmile and bSmile indicate if each is smiling. We
/// are in trouble if they are both smiling or if neither
/// of them is smiling. Return true if we are in trouble.
///
/// TL;DR This is an inverted XOR gate, sort of
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// monkeyTrouble(true, true) → true
/// monkeyTrouble(false, false) → true
/// monkeyTrouble(true, false) → false
/// ```
pub fn monkey_trouble(a_smile: bool, b_smile: bool) -> bool {
return !((a_smile && !b_smile) || (!a_smile && b_smile));
}
/// Given a non-empty string and an int n, return a new
/// string where the char at index n has been removed.
/// The value of n will be a valid index of a char in
/// the original string (i.e. n will be in the range
/// 0..str.length()-1 inclusive).
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// missing_char("kitten", 1) → "ktten"
/// missing_char("kitten", 0) → "itten"
/// missing_char("kitten", 4) → "kittn
/// ```
pub fn missing_char(string: &str, n: usize) -> String {
let mut new_string: String = String::from(string);
new_string.remove(n);
return new_string;
}
/// Given 2 positive int values, return the larger
/// value that is in the range 10..20 inclusive, or
/// return 0 if neither is in that range.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// max1020(11, 19); → 19
/// max1020(19, 11); → 19
/// max1020(11, 9); → 11
/// ```
pub fn max_1020(a: i32, b: i32) -> i32 {
let range = 10..20;
if range.contains(&a) || range.contains(&b) {
return max(a, b);
}
return 0;
}
/// Given 2 ints, a and b, return true if one if
/// them is 10 or if their sum is 10.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// makes_10(9, 10) → true
/// makes_10(9, 9) → false
/// makes_10(1, 9) → true
/// ```
pub fn makes_10(a: i32, b: i32) -> bool {
return (a == 10) || (b == 10) || (a + b == 10);
}
/// Given two non-negative int values, return true
/// if they have the same last digit, such as
/// with 27 and 57. Note that the % "mod" operator
/// computes remainders, so 17 % 10 is 7.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// lastDigit(7, 17) → true
/// lastDigit(6, 17) → false
/// lastDigit(3, 113) → true
/// ```
pub fn last_digit(a: i32, b: i32) -> bool {
let mut _a = a;
let mut _b = b;
while _a > 10 || _b > 10 {
if _a >= 10 {
_a = _a % 10;
}
if _b >= 10 {
_b = _b % 10;
}
}
return _a == _b;
}
/// Given an int n, return the absolute difference
/// between n and 21, except return double the absolute
/// difference if n is over 21.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// diff21(19) → 2
/// diff21(10) → 11
/// diff21(21) → 0
/// ```
pub fn diff_21(n: i32) -> i32 {
let val: i32 = n - 21;
return val.abs();
}
/// Given a string, return a new string where the
/// first and last chars have been exchanged.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// frontBack("code") → "eodc"
/// frontBack("a") → "a"
/// frontBack("ab") → "ba"
/// ```
pub fn front_back(string: &str) -> String {
return string.chars().rev().collect::<String>();
}
/// Given a string, we'll say that the front is the first
/// 3 chars of the string. If the string length is less
/// than 3, the front is whatever is there. Return a
/// new string which is 3 copies of the front.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// front3("Java") → "JavJavJav"
/// front3("Chocolate") → "ChoChoCho"
/// front3("abc") → "abcabcabc"
/// ```
pub fn front_3(string: &str) -> String {
let mut new_string: String = "".to_string();
let boi = &string.chars().take(3).collect::<String>();
for _i in 0..3 {
new_string.push_str(boi);
}
return new_string;
}
/// Given a string, take the last char and return a new
/// string with the last char added at the front and
/// back, so "cat" yields "tcatt". The original string
/// will be length 1 or more.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// backAround("cat") → "tcatt"
/// backAround("Hello") → "oHelloo"
/// backAround("a") → "aaa"
/// ```
pub fn back_around(string: &str) -> String {
return "".to_string();
}
/// Given 2 int values, return whichever value is nearest
/// to the value 10, or return 0 in the event of a tie.
/// Note that Math.abs(n) returns the absolute value of
/// a number.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// close10(8, 13) → 8
/// close10(13, 8) → 8
/// close10(13, 7) → 0
/// ```
pub fn close_10(n: i32) -> i32 {
return 0;
}
/// Given a string, if the string "del" appears starting
/// at index 1, return a string where that "del" has been
/// deleted. Otherwise, return the string unchanged.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// delDel("adelbc") → "abc"
/// delDel("adelHello") → "aHello"
/// delDel("adedbc") → "adedbc"
/// ```
pub fn del_del(string: &str) -> String {
return "".to_string();
}
/// Given a non-empty string and an int N, return
/// the string made starting with char 0, and then
/// every Nth char of the string. So if N is 3, use
/// char 0, 3, 6, ... and so on. N is 1 or more.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// every_nth("Miracle", 2) → "Mrce"
/// every_nth("abcdefg", 2) → "aceg"
/// every_nth("abcdefg", 3) → "adg"
/// ```
pub fn every_nth(s: &str, n: usize) -> String {
let mut new_string: String = "".to_string().to_owned();
let mut _b = 0;
if n%2 == 0 {
_b = n - 1;
} else {
_b = n - 2;
}
for (i, j) in s.chars().enumerate() {
if i%n < _b {
new_string.push(j);
}
}
return new_string;
}
/// We'll say that a number is "teen" if it is in the
/// range 13..19 inclusive. Given 3 int values, return
/// true if 1 or more of them are teen.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// hasTeen(13, 20, 10) → true
/// hasTeen(20, 19, 10) → true
/// hasTeen(20, 10, 13) → true
/// ```
pub fn has_teen(a: i32, b: i32, c: i32) -> bool {return false;}
/// Given two temperatures, return true if one is less
/// than 0 and the other is greater than 100.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// icyHot(120, -1) → true
/// icyHot(-1, 120) → true
/// icyHot(2, 120) → false
/// ```
pub fn icy_hot(a: i32, b: i32) -> bool {return false;}
/// Given 2 int values, return true if either of them
/// is in the range 10..20 inclusive.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// in1020(12, 99) → true
/// in1020(21, 12) → true
/// in1020(8, 99) → false
/// ```
pub fn in_1020(a: i32, b: i32) -> bool {
return false;
}
/// Given 2 int values, return true if they are both
/// in the range 30..40 inclusive, or they are both
/// in the range 40..50 inclusive.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// in3050(30, 31) → true
/// in3050(30, 41) → false
/// in3050(40, 50) → true
/// ```
pub fn in_3050(a: i32, b:i32) -> bool {
return false;
}
/// Given three int values, a b c, return the largest.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// intMax(1, 2, 3) → 3
/// intMax(1, 3, 2) → 3
/// intMax(3, 2, 1) → 3
/// ```
pub fn int_max(a: i32, b: i32, c: i32) -> i32 {
return 0;
}
/// Return true if the given non-negative number is a
/// multiple of 3 or a multiple of 5. Use the % "mod"
/// operator -- see
/// [Introduction to Mod](https://codingbat.com/doc/practice/mod-introduction.html)
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// or35(3) → true
/// or35(10) → true
/// or35(8) → false
/// ```
pub fn or_35(n: i32) -> bool {
return false;
}
/// Given a string, return a string made of the first 2
/// chars (if present), however include first char only
/// if it is 'o' and include the second only if it is 'z',
/// so "ozymandias" yields "oz".
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// startOz("ozymandias") → "oz"
/// startOz("bzoo") → "z"
/// startOz("oxx") → "o"
/// ```
pub fn start_oz() {}
/// Return true if the given string begins with "mix",
/// except the 'm' can be anything, so "pix", "9ix" ..
/// all count.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// mixStart("mix snacks") → true
/// mixStart("pix snacks") → true
/// mixStart("piz snacks") → false
/// ```
pub fn mix_start() {}
/// Given a string, return true if the string starts with
/// "hi" and false otherwise.
///
/// # Example
///
/// Basic Usage:
///
/// ```
/// startHi("hi there") → true
/// startHi("hi") → true
/// startHi("hello hi") → false
/// ```
pub fn start_hi(string: &str) -> bool {
return false;
} | 22.69783 | 83 | 0.560385 |
fcede3eae976f61c56ab623c455c8af4f14672fc | 24,768 | // ----------------------------------------------------------------------
// Checking loans
//
// Phase 2 of check: we walk down the tree and check that:
// 1. assignments are always made to mutable locations;
// 2. loans made in overlapping scopes do not conflict
// 3. assignments do not affect things loaned out as immutable
// 4. moves do not affect things loaned out in any way
use crate::borrowck::*;
use crate::borrowck::InteriorKind::{InteriorElement, InteriorField};
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::expr_use_visitor::MutateMode;
use rustc::middle::mem_categorization as mc;
use rustc::middle::mem_categorization::Categorization;
use rustc::middle::region;
use rustc::ty::{self, TyCtxt, RegionKind};
use syntax_pos::Span;
use rustc::hir;
use rustc::hir::Node;
use log::debug;
use std::rc::Rc;
// FIXME (#16118): These functions are intended to allow the borrow checker to
// be less precise in its handling of Box while still allowing moves out of a
// Box. They should be removed when Unique is removed from LoanPath.
fn owned_ptr_base_path<'a, 'tcx>(loan_path: &'a LoanPath<'tcx>) -> &'a LoanPath<'tcx> {
//! Returns the base of the leftmost dereference of an Unique in
//! `loan_path`. If there is no dereference of an Unique in `loan_path`,
//! then it just returns `loan_path` itself.
return match helper(loan_path) {
Some(new_loan_path) => new_loan_path,
None => loan_path,
};
fn helper<'a, 'tcx>(loan_path: &'a LoanPath<'tcx>) -> Option<&'a LoanPath<'tcx>> {
match loan_path.kind {
LpVar(_) | LpUpvar(_) => None,
LpExtend(ref lp_base, _, LpDeref(mc::Unique)) => {
match helper(&lp_base) {
v @ Some(_) => v,
None => Some(&lp_base)
}
}
LpDowncast(ref lp_base, _) |
LpExtend(ref lp_base, ..) => helper(&lp_base)
}
}
}
fn owned_ptr_base_path_rc<'tcx>(loan_path: &Rc<LoanPath<'tcx>>) -> Rc<LoanPath<'tcx>> {
//! The equivalent of `owned_ptr_base_path` for an &Rc<LoanPath> rather than
//! a &LoanPath.
return match helper(loan_path) {
Some(new_loan_path) => new_loan_path,
None => loan_path.clone()
};
fn helper<'tcx>(loan_path: &Rc<LoanPath<'tcx>>) -> Option<Rc<LoanPath<'tcx>>> {
match loan_path.kind {
LpVar(_) | LpUpvar(_) => None,
LpExtend(ref lp_base, _, LpDeref(mc::Unique)) => {
match helper(lp_base) {
v @ Some(_) => v,
None => Some(lp_base.clone())
}
}
LpDowncast(ref lp_base, _) |
LpExtend(ref lp_base, ..) => helper(lp_base)
}
}
}
struct CheckLoanCtxt<'a, 'tcx> {
bccx: &'a BorrowckCtxt<'a, 'tcx>,
dfcx_loans: &'a LoanDataFlow<'tcx>,
move_data: &'a move_data::FlowedMoveData<'tcx>,
all_loans: &'a [Loan<'tcx>],
movable_generator: bool,
}
impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> {
fn consume(&mut self,
consume_id: hir::HirId,
_: Span,
cmt: &mc::cmt_<'tcx>,
mode: euv::ConsumeMode) {
debug!("consume(consume_id={}, cmt={:?})", consume_id, cmt);
self.consume_common(consume_id.local_id, cmt, mode);
}
fn matched_pat(&mut self,
_matched_pat: &hir::Pat,
_cmt: &mc::cmt_<'_>,
_mode: euv::MatchMode) { }
fn consume_pat(&mut self,
consume_pat: &hir::Pat,
cmt: &mc::cmt_<'tcx>,
mode: euv::ConsumeMode) {
debug!("consume_pat(consume_pat={:?}, cmt={:?})", consume_pat, cmt);
self.consume_common(consume_pat.hir_id.local_id, cmt, mode);
}
fn borrow(&mut self,
borrow_id: hir::HirId,
borrow_span: Span,
cmt: &mc::cmt_<'tcx>,
loan_region: ty::Region<'tcx>,
bk: ty::BorrowKind,
loan_cause: euv::LoanCause)
{
debug!("borrow(borrow_id={}, cmt={:?}, loan_region={:?}, \
bk={:?}, loan_cause={:?})",
borrow_id, cmt, loan_region,
bk, loan_cause);
if let Some(lp) = opt_loan_path(cmt) {
self.check_if_path_is_moved(borrow_id.local_id, &lp);
}
self.check_for_conflicting_loans(borrow_id.local_id);
self.check_for_loans_across_yields(cmt, loan_region, borrow_span);
}
fn mutate(&mut self,
assignment_id: hir::HirId,
_: Span,
assignee_cmt: &mc::cmt_<'tcx>,
mode: euv::MutateMode)
{
debug!("mutate(assignment_id={}, assignee_cmt={:?})",
assignment_id, assignee_cmt);
if let Some(lp) = opt_loan_path(assignee_cmt) {
match mode {
MutateMode::Init | MutateMode::JustWrite => {
// In a case like `path = 1`, then path does not
// have to be *FULLY* initialized, but we still
// must be careful lest it contains derefs of
// pointers.
self.check_if_assigned_path_is_moved(assignee_cmt.hir_id.local_id, &lp);
}
MutateMode::WriteAndRead => {
// In a case like `path += 1`, then path must be
// fully initialized, since we will read it before
// we write it.
self.check_if_path_is_moved(assignee_cmt.hir_id.local_id,
&lp);
}
}
}
self.check_assignment(assignment_id.local_id, assignee_cmt);
}
fn decl_without_init(&mut self, _id: hir::HirId, _span: Span) { }
}
pub fn check_loans<'a, 'tcx>(
bccx: &BorrowckCtxt<'a, 'tcx>,
dfcx_loans: &LoanDataFlow<'tcx>,
move_data: &move_data::FlowedMoveData<'tcx>,
all_loans: &[Loan<'tcx>],
body: &hir::Body,
) {
debug!("check_loans(body id={})", body.value.hir_id);
let def_id = bccx.tcx.hir().body_owner_def_id(body.id());
let hir_id = bccx.tcx.hir().as_local_hir_id(def_id).unwrap();
let movable_generator = !match bccx.tcx.hir().get(hir_id) {
Node::Expr(&hir::Expr {
node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)),
..
}) => true,
_ => false,
};
let param_env = bccx.tcx.param_env(def_id);
let mut clcx = CheckLoanCtxt {
bccx,
dfcx_loans,
move_data,
all_loans,
movable_generator,
};
let rvalue_promotable_map = bccx.tcx.rvalue_promotable_map(def_id);
euv::ExprUseVisitor::new(&mut clcx,
bccx.tcx,
def_id,
param_env,
&bccx.region_scope_tree,
bccx.tables,
Some(rvalue_promotable_map))
.consume_body(body);
}
fn compatible_borrow_kinds(borrow_kind1: ty::BorrowKind,
borrow_kind2: ty::BorrowKind)
-> bool {
borrow_kind1 == ty::ImmBorrow && borrow_kind2 == ty::ImmBorrow
}
impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
pub fn tcx(&self) -> TyCtxt<'tcx> { self.bccx.tcx }
pub fn each_issued_loan<F>(&self, node: hir::ItemLocalId, mut op: F) -> bool where
F: FnMut(&Loan<'tcx>) -> bool,
{
//! Iterates over each loan that has been issued
//! on entrance to `node`, regardless of whether it is
//! actually *in scope* at that point. Sometimes loans
//! are issued for future scopes and thus they may have been
//! *issued* but not yet be in effect.
self.dfcx_loans.each_bit_on_entry(node, |loan_index| {
let loan = &self.all_loans[loan_index];
op(loan)
})
}
pub fn each_in_scope_loan<F>(&self, scope: region::Scope, mut op: F) -> bool where
F: FnMut(&Loan<'tcx>) -> bool,
{
//! Like `each_issued_loan()`, but only considers loans that are
//! currently in scope.
self.each_issued_loan(scope.item_local_id(), |loan| {
if self.bccx.region_scope_tree.is_subscope_of(scope, loan.kill_scope) {
op(loan)
} else {
true
}
})
}
fn each_in_scope_loan_affecting_path<F>(&self,
scope: region::Scope,
loan_path: &LoanPath<'tcx>,
mut op: F)
-> bool where
F: FnMut(&Loan<'tcx>) -> bool,
{
//! Iterates through all of the in-scope loans affecting `loan_path`,
//! calling `op`, and ceasing iteration if `false` is returned.
// First, we check for a loan restricting the path P being used. This
// accounts for borrows of P but also borrows of subpaths, like P.a.b.
// Consider the following example:
//
// let x = &mut a.b.c; // Restricts a, a.b, and a.b.c
// let y = a; // Conflicts with restriction
let loan_path = owned_ptr_base_path(loan_path);
let cont = self.each_in_scope_loan(scope, |loan| {
let mut ret = true;
for restr_path in &loan.restricted_paths {
if **restr_path == *loan_path {
if !op(loan) {
ret = false;
break;
}
}
}
ret
});
if !cont {
return false;
}
// Next, we must check for *loans* (not restrictions) on the path P or
// any base path. This rejects examples like the following:
//
// let x = &mut a.b;
// let y = a.b.c;
//
// Limiting this search to *loans* and not *restrictions* means that
// examples like the following continue to work:
//
// let x = &mut a.b;
// let y = a.c;
let mut loan_path = loan_path;
loop {
match loan_path.kind {
LpVar(_) | LpUpvar(_) => {
break;
}
LpDowncast(ref lp_base, _) |
LpExtend(ref lp_base, ..) => {
loan_path = &lp_base;
}
}
let cont = self.each_in_scope_loan(scope, |loan| {
if *loan.loan_path == *loan_path {
op(loan)
} else {
true
}
});
if !cont {
return false;
}
}
return true;
}
pub fn loans_generated_by(&self, node: hir::ItemLocalId) -> Vec<usize> {
//! Returns a vector of the loans that are generated as
//! we enter `node`.
let mut result = Vec::new();
self.dfcx_loans.each_gen_bit(node, |loan_index| {
result.push(loan_index);
true
});
return result;
}
pub fn check_for_loans_across_yields(&self,
cmt: &mc::cmt_<'tcx>,
loan_region: ty::Region<'tcx>,
borrow_span: Span) {
pub fn borrow_of_local_data(cmt: &mc::cmt_<'_>) -> bool {
match cmt.cat {
// Borrows of static items is allowed
Categorization::StaticItem => false,
// Reborrow of already borrowed data is ignored
// Any errors will be caught on the initial borrow
Categorization::Deref(..) => false,
// By-ref upvars has Derefs so they will get ignored.
// Generators counts as FnOnce so this leaves only
// by-move upvars, which is local data for generators
Categorization::Upvar(..) => true,
Categorization::ThreadLocal(region) |
Categorization::Rvalue(region) => {
// Rvalues promoted to 'static are no longer local
if let RegionKind::ReStatic = *region {
false
} else {
true
}
}
// Borrow of local data must be checked
Categorization::Local(..) => true,
// For interior references and downcasts, find out if the base is local
Categorization::Downcast(ref cmt_base, _) |
Categorization::Interior(ref cmt_base, _) => borrow_of_local_data(&cmt_base),
}
}
if !self.movable_generator {
return;
}
if !borrow_of_local_data(cmt) {
return;
}
let scope = match *loan_region {
// A concrete region in which we will look for a yield expression
RegionKind::ReScope(scope) => scope,
// There cannot be yields inside an empty region
RegionKind::ReEmpty => return,
// Local data cannot have these lifetimes
RegionKind::ReEarlyBound(..) |
RegionKind::ReLateBound(..) |
RegionKind::ReFree(..) |
RegionKind::ReStatic => {
self.bccx
.tcx
.sess.delay_span_bug(borrow_span,
&format!("unexpected region for local data {:?}",
loan_region));
return
}
// These cannot exist in borrowck
RegionKind::ReVar(..) |
RegionKind::RePlaceholder(..) |
RegionKind::ReClosureBound(..) |
RegionKind::ReErased => span_bug!(borrow_span,
"unexpected region in borrowck {:?}",
loan_region),
};
let body_id = self.bccx.body.value.hir_id.local_id;
if self.bccx.region_scope_tree.containing_body(scope) != Some(body_id) {
// We are borrowing local data longer than its storage.
// This should result in other borrowck errors.
self.bccx.tcx.sess.delay_span_bug(borrow_span,
"borrowing local data longer than its storage");
return;
}
if let Some(_) = self.bccx.region_scope_tree
.yield_in_scope_for_expr(scope, cmt.hir_id, self.bccx.body)
{
self.bccx.signal_error();
}
}
pub fn check_for_conflicting_loans(&self, node: hir::ItemLocalId) {
//! Checks to see whether any of the loans that are issued
//! on entrance to `node` conflict with loans that have already been
//! issued when we enter `node` (for example, we do not
//! permit two `&mut` borrows of the same variable).
//!
//! (Note that some loans can be *issued* without necessarily
//! taking effect yet.)
debug!("check_for_conflicting_loans(node={:?})", node);
let new_loan_indices = self.loans_generated_by(node);
debug!("new_loan_indices = {:?}", new_loan_indices);
for &new_loan_index in &new_loan_indices {
self.each_issued_loan(node, |issued_loan| {
let new_loan = &self.all_loans[new_loan_index];
// Only report an error for the first issued loan that conflicts
// to avoid O(n^2) errors.
self.report_error_if_loans_conflict(issued_loan, new_loan)
});
}
for (i, &x) in new_loan_indices.iter().enumerate() {
let old_loan = &self.all_loans[x];
for &y in &new_loan_indices[(i+1) ..] {
let new_loan = &self.all_loans[y];
self.report_error_if_loans_conflict(old_loan, new_loan);
}
}
}
pub fn report_error_if_loans_conflict(
&self,
old_loan: &Loan<'tcx>,
new_loan: &Loan<'tcx>,
) -> bool {
//! Checks whether `old_loan` and `new_loan` can safely be issued
//! simultaneously.
debug!("report_error_if_loans_conflict(old_loan={:?}, new_loan={:?})",
old_loan,
new_loan);
// Should only be called for loans that are in scope at the same time.
assert!(self.bccx.region_scope_tree.scopes_intersect(old_loan.kill_scope,
new_loan.kill_scope));
self.report_error_if_loan_conflicts_with_restriction(
old_loan, new_loan)
&& self.report_error_if_loan_conflicts_with_restriction(
new_loan, old_loan)
}
pub fn report_error_if_loan_conflicts_with_restriction(
&self,
loan1: &Loan<'tcx>,
loan2: &Loan<'tcx>,
) -> bool {
//! Checks whether the restrictions introduced by `loan1` would
//! prohibit `loan2`.
debug!("report_error_if_loan_conflicts_with_restriction(\
loan1={:?}, loan2={:?})",
loan1,
loan2);
if compatible_borrow_kinds(loan1.kind, loan2.kind) {
return true;
}
let loan2_base_path = owned_ptr_base_path_rc(&loan2.loan_path);
for restr_path in &loan1.restricted_paths {
if *restr_path != loan2_base_path { continue; }
self.bccx.signal_error();
return false;
}
true
}
fn consume_common(
&self,
id: hir::ItemLocalId,
cmt: &mc::cmt_<'tcx>,
mode: euv::ConsumeMode,
) {
if let Some(lp) = opt_loan_path(cmt) {
match mode {
euv::Copy => {
self.check_for_copy_of_frozen_path(id, &lp);
}
euv::Move(_) => {
// Sometimes moves aren't from a move path;
// this either means that the original move
// was from something illegal to move,
// or was moved from referent of an unsafe
// pointer or something like that.
if self.move_data.is_move_path(id, &lp) {
self.check_for_move_of_borrowed_path(id, &lp);
}
}
}
self.check_if_path_is_moved(id, &lp);
}
}
fn check_for_copy_of_frozen_path(&self,
id: hir::ItemLocalId,
copy_path: &LoanPath<'tcx>) {
self.analyze_restrictions_on_use(id, copy_path, ty::ImmBorrow);
}
fn check_for_move_of_borrowed_path(&self,
id: hir::ItemLocalId,
move_path: &LoanPath<'tcx>) {
// We want to detect if there are any loans at all, so we search for
// any loans incompatible with MutBorrrow, since all other kinds of
// loans are incompatible with that.
self.analyze_restrictions_on_use(id, move_path, ty::MutBorrow);
}
fn analyze_restrictions_on_use(&self,
expr_id: hir::ItemLocalId,
use_path: &LoanPath<'tcx>,
borrow_kind: ty::BorrowKind) {
debug!("analyze_restrictions_on_use(expr_id={:?}, use_path={:?})",
expr_id, use_path);
let scope = region::Scope {
id: expr_id,
data: region::ScopeData::Node
};
self.each_in_scope_loan_affecting_path(
scope, use_path, |loan| {
if !compatible_borrow_kinds(loan.kind, borrow_kind) {
self.bccx.signal_error();
false
} else {
true
}
});
}
/// Reports an error if `expr` (which should be a path)
/// is using a moved/uninitialized value
fn check_if_path_is_moved(&self,
id: hir::ItemLocalId,
lp: &Rc<LoanPath<'tcx>>) {
debug!("check_if_path_is_moved(id={:?}, lp={:?})", id, lp);
// FIXME: if you find yourself tempted to cut and paste
// the body below and then specializing the error reporting,
// consider refactoring this instead!
let base_lp = owned_ptr_base_path_rc(lp);
self.move_data.each_move_of(id, &base_lp, |_, _| {
self.bccx.signal_error();
false
});
}
/// Reports an error if assigning to `lp` will use a
/// moved/uninitialized value. Mainly this is concerned with
/// detecting derefs of uninitialized pointers.
///
/// For example:
///
/// ```
/// let a: i32;
/// a = 10; // ok, even though a is uninitialized
/// ```
///
/// ```
/// struct Point { x: u32, y: u32 }
/// let mut p: Point;
/// p.x = 22; // ok, even though `p` is uninitialized
/// ```
///
/// ```compile_fail,E0381
/// # struct Point { x: u32, y: u32 }
/// let mut p: Box<Point>;
/// (*p).x = 22; // not ok, p is uninitialized, can't deref
/// ```
fn check_if_assigned_path_is_moved(&self,
id: hir::ItemLocalId,
lp: &Rc<LoanPath<'tcx>>)
{
match lp.kind {
LpVar(_) | LpUpvar(_) => {
// assigning to `x` does not require that `x` is initialized
}
LpDowncast(ref lp_base, _) => {
// assigning to `(P->Variant).f` is ok if assigning to `P` is ok
self.check_if_assigned_path_is_moved(id, lp_base);
}
LpExtend(ref lp_base, _, LpInterior(_, InteriorField(_))) => {
match lp_base.to_type().kind {
ty::Adt(def, _) if def.has_dtor(self.tcx()) => {
// In the case where the owner implements drop, then
// the path must be initialized to prevent a case of
// partial reinitialization
//
// FIXME: could refactor via hypothetical
// generalized check_if_path_is_moved
let loan_path = owned_ptr_base_path_rc(lp_base);
self.move_data.each_move_of(id, &loan_path, |_, _| {
self.bccx
.signal_error();
false
});
return;
},
_ => {},
}
// assigning to `P.f` is ok if assigning to `P` is ok
self.check_if_assigned_path_is_moved(id, lp_base);
}
LpExtend(ref lp_base, _, LpInterior(_, InteriorElement)) |
LpExtend(ref lp_base, _, LpDeref(_)) => {
// assigning to `P[i]` requires `P` is initialized
// assigning to `(*P)` requires `P` is initialized
self.check_if_path_is_moved(id, lp_base);
}
}
}
fn check_assignment(&self,
assignment_id: hir::ItemLocalId,
assignee_cmt: &mc::cmt_<'tcx>) {
debug!("check_assignment(assignee_cmt={:?})", assignee_cmt);
// Check that we don't invalidate any outstanding loans
if let Some(loan_path) = opt_loan_path(assignee_cmt) {
let scope = region::Scope {
id: assignment_id,
data: region::ScopeData::Node
};
self.each_in_scope_loan_affecting_path(scope, &loan_path, |_| {
self.bccx.signal_error();
false
});
}
// Check for reassignments to (immutable) local variables. This
// needs to be done here instead of in check_loans because we
// depend on move data.
if let Categorization::Local(_) = assignee_cmt.cat {
let lp = opt_loan_path(assignee_cmt).unwrap();
self.move_data.each_assignment_of(assignment_id, &lp, |_| {
if !assignee_cmt.mutbl.is_mutable() {
self.bccx.signal_error();
}
false
});
return
}
}
}
| 36.370044 | 94 | 0.510942 |
dea8156d4ac03d53e87598d0ca8589fa470d089a | 1,557 | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Higher level wallet functions which can be used by callers to operate
//! on the wallet, as well as helpers to invoke and instantiate wallets
//! and listeners
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(non_snake_case)]
#![deny(unused_mut)]
#![warn(missing_docs)]
use grin_wallet_util::grin_core as core;
use grin_wallet_util::grin_keychain as keychain;
use grin_wallet_util::grin_util as util;
extern crate grin_wallet_impls as impls;
extern crate grin_wallet_libwallet as libwallet;
extern crate failure_derive;
extern crate serde_json;
#[macro_use]
extern crate log;
mod foreign;
mod foreign_rpc;
mod owner;
mod owner_rpc;
pub use crate::foreign::Foreign;
pub use crate::foreign_rpc::ForeignRpc;
pub use crate::owner::Owner;
pub use crate::owner_rpc::OwnerRpc;
pub use crate::foreign_rpc::foreign_rpc as foreign_rpc_client;
pub use crate::foreign_rpc::run_doctest_foreign;
pub use crate::owner_rpc::run_doctest_owner;
| 31.77551 | 75 | 0.775209 |
ab9a99a2ba8c4cc4f19da026f115bdc6bbf62e86 | 3,144 | use serde::{Deserialize, Serialize};
use crate::{
requests::{dynamic, json, Method},
types::True,
};
/// Use this method to send answers to callback queries sent from inline keyboards. The answer will be displayed to the user as a notification at the top of the chat screen or as an alert. On success, True is returned.Alternatively, the user can be redirected to the specified Game URL. For this option to work, you must first create a game for your bot via @Botfather and accept the terms. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter.
#[serde_with_macros::skip_serializing_none]
#[derive(Debug, PartialEq, Eq, Hash, Clone, Deserialize, Serialize)]
pub struct AnswerCallbackQuery {
/// Unique identifier for the query to be answered
callback_query_id: String,
/// Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters
text: Option<String>,
/// If true, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to false.
show_alert: Option<bool>,
/// URL that will be opened by the user's client. If you have created a Game and accepted the conditions via @Botfather, specify the URL that opens your game – note that this will only work if the query comes from a callback_game button.Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter.
url: Option<String>,
/// The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0.
cache_time: Option<i32>,
}
impl Method for AnswerCallbackQuery {
type Output = True;
const NAME: &'static str = "answerCallbackQuery";
}
impl json::Payload for AnswerCallbackQuery {}
impl dynamic::Payload for AnswerCallbackQuery {
fn kind(&self) -> dynamic::Kind {
dynamic::Kind::Json(serde_json::to_string(self).unwrap())
}
}
impl AnswerCallbackQuery {
pub fn new<C>(callback_query_id: C) -> Self
where
C: Into<String>
{
let callback_query_id = callback_query_id.into();
Self {
callback_query_id,
text: None,
show_alert: None,
url: None,
cache_time: None,
}
}
}
impl json::Request<'_, AnswerCallbackQuery> {
pub fn callback_query_id<T>(mut self, val: T) -> Self
where
T: Into<String>
{
self.payload.callback_query_id = val.into();
self
}
pub fn text<T>(mut self, val: T) -> Self
where
T: Into<String>
{
self.payload.text = Some(val.into());
self
}
pub fn show_alert(mut self, val: bool) -> Self {
self.payload.show_alert = Some(val);
self
}
pub fn url<T>(mut self, val: T) -> Self
where
T: Into<String>
{
self.payload.url = Some(val.into());
self
}
pub fn cache_time(mut self, val: i32) -> Self {
self.payload.cache_time = Some(val);
self
}
}
| 35.325843 | 486 | 0.660305 |
abe52536b334c80db8cd652c8384cca74d7109e3 | 2,025 | use crate::core::group_by_query_key;
use crate::errors::RequestError;
use crate::schemas::{Error, Pets};
use actix_web::{HttpRequest, HttpResponse};
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub struct Query {
pub limit: Option<i32>,
}
impl Query {
pub async fn from_raw(raw: &HttpRequest) -> Result<Self, RequestError> {
let kvs = group_by_query_key(raw.query_string())?;
let value_of_limit = kvs
.get("limit")
.map(|values| {
values[0]
.parse::<i32>()
.map_err(|e| RequestError::InvalidQueryValue {
key: "limit".to_string(),
message: e.to_string(),
})
})
.transpose()?;
Ok(Query {
limit: value_of_limit,
})
}
}
#[derive(Debug)]
pub struct Request {
pub query: Query,
pub raw: HttpRequest,
}
impl Request {
pub async fn from_raw(raw: HttpRequest) -> Result<Self, RequestError> {
let query = Query::from_raw(&raw).await?;
Ok(Request { query, raw })
}
}
pub trait Responder {
fn to_raw(self) -> HttpResponse;
}
#[derive(Debug)]
pub struct ResponseHeaders {
pub x_next: Option<String>,
}
#[derive(Debug)]
pub enum Response {
OK {
headers: ResponseHeaders,
content: Pets,
},
InternalServerError {
content: Error,
},
}
impl Responder for Response {
fn to_raw(self) -> HttpResponse {
match self {
Response::OK { headers, content } => {
let mut response = HttpResponse::Ok();
if let Some(value) = headers.x_next {
response.set_header("x-next", value);
}
response.json(content)
}
Response::InternalServerError { content } => {
let mut response = HttpResponse::InternalServerError();
response.json(content)
}
}
}
}
| 24.107143 | 76 | 0.535802 |
4affebc2a9c1a507df2aa5cb29275a80a108c6db | 32,297 | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
base_expr::BaseExpr, eq::AstEq, ArrayPattern, ArrayPredicatePattern, BinExpr, Bytes,
ClauseGroup, Comprehension, EventPath, ExprPath, GroupBy, GroupByInt, Helper, ImutExpr,
ImutExprInt, Invoke, InvokeAggr, List, Literal, LocalPath, Match, Merge, MetadataPath,
NodeMetas, Patch, PatchOperation, Path, Pattern, PredicateClause, PredicatePattern, Record,
RecordPattern, Recur, ReservedPath, Segment, StatePath, StrLitElement, StringLit, UnaryExpr,
Value,
};
use crate::errors::{error_event_ref_not_allowed, Result};
/// Return value from visit methods for `ImutExprIntVisitor`
/// controlling whether to continue walking the subtree or not
pub enum VisitRes {
/// carry on walking
Walk,
/// stop walking
Stop,
}
use VisitRes::Walk;
/// Visitor for traversing all `ImutExprInt`s within the given `ImutExprInt`
///
/// Implement your custom expr visiting logic by overwriting the visit_* methods.
/// You do not need to traverse further down. This is done by the provided `walk_*` methods.
/// The walk_* methods implement walking the expression tree, those do not need to be changed.
pub trait ImutExprIntVisitor<'script> {
/// visit a record
///
/// # Errors
/// if the walker function fails
fn visit_record(&mut self, _record: &mut Record<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a record
///
/// # Errors
/// if the walker function fails
fn walk_record(&mut self, record: &mut Record<'script>) -> Result<()> {
for field in &mut record.fields {
self.walk_string(&mut field.name)?;
self.walk_expr(&mut field.value)?;
}
Ok(())
}
/// visit a list
///
/// # Errors
/// if the walker function fails
fn visit_list(&mut self, _list: &mut List<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a list
///
/// # Errors
/// if the walker function fails
fn walk_list(&mut self, list: &mut List<'script>) -> Result<()> {
for element in &mut list.exprs {
self.walk_expr(&mut element.0)?;
}
Ok(())
}
/// visit a binary
///
/// # Errors
/// if the walker function fails
fn visit_binary(&mut self, _binary: &mut BinExpr<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a binary
///
/// # Errors
/// if the walker function fails
fn walk_binary(&mut self, binary: &mut BinExpr<'script>) -> Result<()> {
self.walk_expr(&mut binary.lhs)?;
self.walk_expr(&mut binary.rhs)
}
/// visit a unary expr
///
/// # Errors
/// if the walker function fails
fn visit_unary(&mut self, _unary: &mut UnaryExpr<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a unary
///
/// # Errors
/// if the walker function fails
fn walk_unary(&mut self, unary: &mut UnaryExpr<'script>) -> Result<()> {
self.walk_expr(&mut unary.expr)
}
/// visit a patch expr
///
/// # Errors
/// if the walker function fails
fn visit_patch(&mut self, _patch: &mut Patch<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// visit a match expr
///
/// # Errors
/// if the walker function fails
fn visit_match(&mut self, _mmatch: &mut Match<'script, ImutExprInt>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a patch expr
///
/// # Errors
/// if the walker function fails
fn walk_patch(&mut self, patch: &mut Patch<'script>) -> Result<()> {
self.walk_expr(&mut patch.target)?;
for op in &mut patch.operations {
match op {
PatchOperation::Insert { ident, expr }
| PatchOperation::Default { ident, expr }
| PatchOperation::Merge { ident, expr }
| PatchOperation::Update { ident, expr }
| PatchOperation::Upsert { ident, expr } => {
self.walk_string(ident)?;
self.walk_expr(expr)?;
}
PatchOperation::Copy { from, to } | PatchOperation::Move { from, to } => {
self.walk_string(from)?;
self.walk_string(to)?;
}
PatchOperation::Erase { ident } => {
self.walk_string(ident)?;
}
PatchOperation::DefaultRecord { expr } | PatchOperation::MergeRecord { expr } => {
self.walk_expr(expr)?;
}
}
}
Ok(())
}
/// Walks a precondition
///
/// # Errors
/// if the walker function fails
fn walk_precondition(
&mut self,
precondition: &mut super::ClausePreCondition<'script>,
) -> Result<()> {
for segment in precondition.path.segments_mut() {
self.walk_segment(segment)?;
}
Ok(())
}
/// walk a match expr
///
/// # Errors
/// if the walker function fails
fn walk_match(&mut self, mmatch: &mut Match<'script, ImutExprInt<'script>>) -> Result<()> {
self.walk_expr(&mut mmatch.target)?;
for group in &mut mmatch.patterns {
self.walk_clause_group(group)?;
}
Ok(())
}
/// Walks a predicate clause
///
/// # Errors
/// if the walker function fails
fn walk_predicate_clause(
&mut self,
predicate: &mut PredicateClause<'script, ImutExprInt<'script>>,
) -> Result<()> {
self.walk_match_patterns(&mut predicate.pattern)?;
if let Some(guard) = &mut predicate.guard {
self.walk_expr(guard)?;
}
for expr in &mut predicate.exprs {
self.walk_expr(expr)?;
}
self.walk_expr(&mut predicate.last_expr)?;
Ok(())
}
/// Walks a clause group
///
/// # Errors
/// if the walker function fails
fn walk_clause_group(
&mut self,
group: &mut ClauseGroup<'script, ImutExprInt<'script>>,
) -> Result<()> {
match group {
ClauseGroup::Single {
precondition,
pattern,
} => {
if let Some(precondition) = precondition {
self.walk_precondition(precondition)?;
}
self.walk_predicate_clause(pattern)
}
ClauseGroup::Simple {
precondition,
patterns,
} => {
if let Some(precondition) = precondition {
self.walk_precondition(precondition)?;
}
for predicate in patterns {
self.walk_predicate_clause(predicate)?;
}
Ok(())
}
ClauseGroup::SearchTree {
precondition,
tree,
rest,
} => {
if let Some(precondition) = precondition {
self.walk_precondition(precondition)?;
}
for (_v, (es, e)) in tree.iter_mut() {
for e in es {
self.walk_expr(e)?;
}
self.walk_expr(e)?;
}
for predicate in rest {
self.walk_predicate_clause(predicate)?;
}
Ok(())
}
ClauseGroup::Combined {
precondition,
groups,
} => {
if let Some(precondition) = precondition {
self.walk_precondition(precondition)?;
}
for g in groups {
self.walk_clause_group(g)?;
}
Ok(())
}
}
}
/// walk match patterns
///
/// # Errors
/// if the walker function fails
fn walk_match_patterns(&mut self, pattern: &mut Pattern<'script>) -> Result<()> {
match pattern {
Pattern::Record(record_pat) => {
self.walk_record_pattern(record_pat)?;
}
Pattern::Array(array_pat) => {
self.walk_array_pattern(array_pat)?;
}
Pattern::Expr(expr) => {
self.walk_expr(expr)?;
}
Pattern::Assign(assign_pattern) => {
self.walk_match_patterns(assign_pattern.pattern.as_mut())?;
}
Pattern::Tuple(tuple_pattern) => {
for elem in &mut tuple_pattern.exprs {
match elem {
ArrayPredicatePattern::Expr(expr) => {
self.walk_expr(expr)?;
}
ArrayPredicatePattern::Record(record_pattern) => {
self.walk_record_pattern(record_pattern)?;
}
_ => {}
}
}
}
_ => {}
}
Ok(())
}
/// walk a record pattern
///
/// # Errors
/// if the walker function fails
fn walk_record_pattern(&mut self, record_pattern: &mut RecordPattern<'script>) -> Result<()> {
for field in &mut record_pattern.fields {
match field {
PredicatePattern::RecordPatternEq { pattern, .. } => {
self.walk_record_pattern(pattern)?;
}
PredicatePattern::Bin { rhs, .. } => {
self.walk_expr(rhs)?;
}
PredicatePattern::ArrayPatternEq { pattern, .. } => {
self.walk_array_pattern(pattern)?;
}
_ => {}
}
}
Ok(())
}
/// walk an array pattern
///
/// # Errors
/// if the walker function fails
fn walk_array_pattern(&mut self, array_pattern: &mut ArrayPattern<'script>) -> Result<()> {
for elem in &mut array_pattern.exprs {
match elem {
ArrayPredicatePattern::Expr(expr) => {
self.walk_expr(expr)?;
}
ArrayPredicatePattern::Record(record_pattern) => {
self.walk_record_pattern(record_pattern)?;
}
_ => {}
}
}
Ok(())
}
/// visit a comprehension
///
/// # Errors
/// if the walker function fails
fn visit_comprehension(
&mut self,
_comp: &mut Comprehension<'script, ImutExprInt<'script>>,
) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a comprehension
///
/// # Errors
/// if the walker function fails
fn walk_comprehension(
&mut self,
comp: &mut Comprehension<'script, ImutExprInt<'script>>,
) -> Result<()> {
self.walk_expr(&mut comp.target)?;
for comp_case in &mut comp.cases {
if let Some(guard) = &mut comp_case.guard {
self.walk_expr(guard)?;
}
for expr in &mut comp_case.exprs {
self.walk_expr(expr)?;
}
self.walk_expr(&mut comp_case.last_expr)?;
}
Ok(())
}
/// visit a merge expr
///
/// # Errors
/// if the walker function fails
fn visit_merge(&mut self, _merge: &mut Merge<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a merge expr
///
/// # Errors
/// if the walker function fails
fn walk_merge(&mut self, merge: &mut Merge<'script>) -> Result<()> {
self.walk_expr(&mut merge.target)?;
self.walk_expr(&mut merge.expr)
}
/// walk a path segment
///
/// # Errors
/// if the walker function fails
fn walk_segment(&mut self, segment: &mut Segment<'script>) -> Result<()> {
match segment {
Segment::Element { expr, .. } => self.walk_expr(expr),
Segment::Range {
range_start,
range_end,
..
} => {
self.walk_expr(range_start.as_mut())?;
self.walk_expr(range_end.as_mut())
}
_ => Ok(()),
}
}
/// visit a path
///
/// # Errors
/// if the walker function fails
fn visit_path(&mut self, _path: &mut Path<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a path
///
/// # Errors
/// if the walker function fails
fn walk_path(&mut self, path: &mut Path<'script>) -> Result<()> {
if let Path::Expr(ExprPath { expr, .. }) = path {
self.walk_expr(expr)?;
}
let segments = match path {
Path::Const(LocalPath { segments, .. })
| Path::Local(LocalPath { segments, .. })
| Path::Event(EventPath { segments, .. })
| Path::State(StatePath { segments, .. })
| Path::Meta(MetadataPath { segments, .. })
| Path::Expr(ExprPath { segments, .. })
| Path::Reserved(
ReservedPath::Args { segments, .. }
| ReservedPath::Group { segments, .. }
| ReservedPath::Window { segments, .. },
) => segments,
};
for segment in segments {
self.walk_segment(segment)?;
}
Ok(())
}
/// visit a string
///
/// # Errors
/// if the walker function fails
fn visit_string(&mut self, _string: &mut StringLit<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a string
///
/// # Errors
/// if the walker function fails
fn walk_string(&mut self, string: &mut StringLit<'script>) -> Result<()> {
for element in &mut string.elements {
if let StrLitElement::Expr(expr) = element {
self.walk_expr(expr)?;
}
}
Ok(())
}
/// visit a local
///
/// # Errors
/// if the walker function fails
fn visit_local(&mut self, _local_idx: &mut usize) -> Result<VisitRes> {
Ok(Walk)
}
/// visit a present expr
///
/// # Errors
/// if the walker function fails
fn visit_present(&mut self, _path: &mut Path<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// visit an invoke expr
///
/// # Errors
/// if the walker function fails
fn visit_invoke(&mut self, _invoke: &mut Invoke<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk an invoke expr
///
/// # Errors
/// if the walker function fails
fn walk_invoke(&mut self, invoke: &mut Invoke<'script>) -> Result<()> {
for arg in &mut invoke.args {
self.walk_expr(&mut arg.0)?;
}
Ok(())
}
/// visit an invoke1 expr
///
/// # Errors
/// if the walker function fails
fn visit_invoke1(&mut self, _invoke: &mut Invoke<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// visit an invoke2 expr
///
/// # Errors
/// if the walker function fails
fn visit_invoke2(&mut self, _invoke: &mut Invoke<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// visit an invoke3 expr
///
/// # Errors
/// if the walker function fails
fn visit_invoke3(&mut self, _invoke: &mut Invoke<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// visit an `invoke_aggr` expr
///
/// # Errors
/// if the walker function fails
fn visit_invoke_aggr(&mut self, _invoke_aggr: &mut InvokeAggr) -> Result<VisitRes> {
Ok(Walk)
}
/// visit a recur expr
///
/// # Errors
/// if the walker function fails
fn visit_recur(&mut self, _recur: &mut Recur<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk a recur expr
///
/// # Errors
/// if the walker function fails
fn walk_recur(&mut self, recur: &mut Recur<'script>) -> Result<()> {
for expr in &mut recur.exprs {
self.walk_expr(&mut expr.0)?;
}
Ok(())
}
/// visit bytes
///
/// # Errors
/// if the walker function fails
fn visit_bytes(&mut self, _bytes: &mut Bytes<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// walk bytes
/// # Errors
/// if the walker function fails
fn walk_bytes(&mut self, bytes: &mut Bytes<'script>) -> Result<()> {
for part in &mut bytes.value {
self.walk_expr(&mut part.data.0)?;
}
Ok(())
}
/// visit a literal
///
/// # Errors
/// if the walker function fails
fn visit_literal(&mut self, _literal: &mut Literal<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// visit a generic `ImutExprInt` (this is called before the concrete `visit_*` method)
///
/// # Errors
/// if the walker function fails
fn visit_expr(&mut self, _e: &mut ImutExprInt<'script>) -> Result<VisitRes> {
Ok(Walk)
}
/// entry point into this visitor - call this to start visiting the given expression `e`
///
/// # Errors
/// if the walker function fails
fn walk_expr(&mut self, e: &mut ImutExprInt<'script>) -> Result<()> {
if let Walk = self.visit_expr(e)? {
match e {
ImutExprInt::Record(record) => {
if let Walk = self.visit_record(record)? {
self.walk_record(record)?;
}
}
ImutExprInt::List(list) => {
if let Walk = self.visit_list(list)? {
self.walk_list(list)?;
}
}
ImutExprInt::Binary(binary) => {
if let Walk = self.visit_binary(binary.as_mut())? {
self.walk_binary(binary.as_mut())?;
}
}
ImutExprInt::Unary(unary) => {
if let Walk = self.visit_unary(unary.as_mut())? {
self.walk_unary(unary.as_mut())?;
}
}
ImutExprInt::Patch(patch) => {
if let Walk = self.visit_patch(patch.as_mut())? {
self.walk_patch(patch.as_mut())?;
}
}
ImutExprInt::Match(mmatch) => {
if let Walk = self.visit_match(mmatch.as_mut())? {
self.walk_match(mmatch.as_mut())?;
}
}
ImutExprInt::Comprehension(comp) => {
if let Walk = self.visit_comprehension(comp.as_mut())? {
self.walk_comprehension(comp.as_mut())?;
}
}
ImutExprInt::Merge(merge) => {
if let Walk = self.visit_merge(merge.as_mut())? {
self.walk_merge(merge.as_mut())?;
}
}
ImutExprInt::Path(path) => {
if let Walk = self.visit_path(path)? {
self.walk_path(path)?;
}
}
ImutExprInt::String(string) => {
if let Walk = self.visit_string(string)? {
self.walk_string(string)?;
}
}
ImutExprInt::Local { idx, .. } => {
self.visit_local(idx)?;
}
ImutExprInt::Present { path, .. } => {
if let Walk = self.visit_present(path)? {
self.walk_path(path)?;
}
}
ImutExprInt::Invoke(invoke) => {
if let Walk = self.visit_invoke(invoke)? {
self.walk_invoke(invoke)?;
}
}
ImutExprInt::Invoke1(invoke1) => {
if let Walk = self.visit_invoke1(invoke1)? {
self.walk_invoke(invoke1)?;
}
}
ImutExprInt::Invoke2(invoke2) => {
if let Walk = self.visit_invoke2(invoke2)? {
self.walk_invoke(invoke2)?;
}
}
ImutExprInt::Invoke3(invoke3) => {
if let Walk = self.visit_invoke3(invoke3)? {
self.walk_invoke(invoke3)?;
}
}
ImutExprInt::InvokeAggr(invoke_aggr) => {
self.visit_invoke_aggr(invoke_aggr)?;
}
ImutExprInt::Recur(recur) => {
if let Walk = self.visit_recur(recur)? {
self.walk_recur(recur)?;
}
}
ImutExprInt::Bytes(bytes) => {
if let Walk = self.visit_bytes(bytes)? {
self.walk_bytes(bytes)?;
}
}
ImutExprInt::Literal(lit) => {
self.visit_literal(lit)?;
}
}
}
Ok(())
}
}
pub(crate) trait GroupByVisitor<'script> {
fn visit_expr(&mut self, expr: &ImutExprInt<'script>);
fn walk_group_by(&mut self, group_by: &GroupByInt<'script>) {
match group_by {
GroupByInt::Expr { expr, .. } | GroupByInt::Each { expr, .. } => self.visit_expr(expr),
GroupByInt::Set { items, .. } => {
for inner_group_by in items {
self.walk_group_by(&inner_group_by.0);
}
}
}
}
}
/// analyze the select target expr if it references the event outside of an aggregate function
/// rewrite what we can to group references
///
/// at a later stage we will only allow expressions with event references, if they are
/// also in the group by clause - so we can simply rewrite those to reference `group` and thus we dont need to copy.
pub(crate) struct TargetEventRefVisitor<'script, 'meta> {
rewritten: bool,
meta: &'meta NodeMetas,
group_expressions: Vec<ImutExprInt<'script>>,
}
impl<'script, 'meta> TargetEventRefVisitor<'script, 'meta> {
pub(crate) fn new(
group_expressions: Vec<ImutExprInt<'script>>,
meta: &'meta NodeMetas,
) -> Self {
Self {
rewritten: false,
meta,
group_expressions,
}
}
pub(crate) fn rewrite_target(&mut self, target: &mut ImutExprInt<'script>) -> Result<bool> {
self.walk_expr(target)?;
Ok(self.rewritten)
}
}
impl<'script, 'meta> ImutExprIntVisitor<'script> for TargetEventRefVisitor<'script, 'meta> {
fn visit_expr(&mut self, e: &mut ImutExprInt<'script>) -> Result<VisitRes> {
for (idx, group_expr) in self.group_expressions.iter().enumerate() {
// check if we have an equivalent expression :)
if e.ast_eq(group_expr) {
// rewrite it:
*e = ImutExprInt::Path(Path::Reserved(crate::ast::ReservedPath::Group {
mid: e.mid(),
segments: vec![crate::ast::Segment::Idx { mid: e.mid(), idx }],
}));
self.rewritten = true;
// we do not need to visit this expression further, we already replaced it.
return Ok(VisitRes::Stop);
}
}
Ok(VisitRes::Walk)
}
fn visit_path(&mut self, path: &mut Path<'script>) -> Result<VisitRes> {
match path {
// these are the only exprs that can get a hold of the event payload or its metadata
Path::Event(_) | Path::Meta(_) => {
// fail if we see an event or meta ref in the select target
return error_event_ref_not_allowed(path, path, self.meta);
}
_ => {}
}
Ok(VisitRes::Walk)
}
}
pub(crate) struct GroupByExprExtractor<'script> {
pub(crate) expressions: Vec<ImutExprInt<'script>>,
}
impl<'script> GroupByExprExtractor<'script> {
pub(crate) fn new() -> Self {
Self {
expressions: vec![],
}
}
pub(crate) fn extract_expressions(&mut self, group_by: &GroupBy<'script>) {
self.walk_group_by(&group_by.0);
}
}
impl<'script> GroupByVisitor<'script> for GroupByExprExtractor<'script> {
fn visit_expr(&mut self, expr: &ImutExprInt<'script>) {
self.expressions.push(expr.clone()); // take this, lifetimes (yes, i am stupid)
}
}
pub(crate) struct ArgsRewriter<'script, 'registry, 'meta> {
args: ImutExprInt<'script>,
helper: &'meta mut Helper<'script, 'registry>,
}
impl<'script, 'registry, 'meta> ArgsRewriter<'script, 'registry, 'meta> {
pub(crate) fn new(args: Value<'script>, helper: &'meta mut Helper<'script, 'registry>) -> Self {
let args: ImutExpr = Literal {
mid: 0,
value: args,
}
.into();
Self {
args: args.0,
helper,
}
}
pub(crate) fn rewrite_expr(&mut self, expr: &mut ImutExprInt<'script>) -> Result<()> {
self.walk_expr(expr)?;
Ok(())
}
pub(crate) fn rewrite_group_by(&mut self, group_by: &mut GroupByInt<'script>) -> Result<()> {
match group_by {
GroupByInt::Expr { expr, .. } | GroupByInt::Each { expr, .. } => {
self.rewrite_expr(expr)?;
}
GroupByInt::Set { items, .. } => {
for inner_group_by in items {
self.rewrite_group_by(&mut inner_group_by.0)?;
}
}
}
Ok(())
}
}
impl<'script, 'registry, 'meta> ImutExprIntVisitor<'script>
for ArgsRewriter<'script, 'registry, 'meta>
{
fn visit_path(&mut self, path: &mut Path<'script>) -> Result<VisitRes> {
if let Path::Reserved(ReservedPath::Args { segments, mid }) = path {
let new = ExprPath {
expr: Box::new(self.args.clone()),
segments: segments.clone(),
mid: *mid,
var: self.helper.reserve_shadow(),
};
*path = Path::Expr(new);
self.helper.end_shadow_var();
}
Ok(VisitRes::Walk)
}
}
pub(crate) struct ExprReducer<'script, 'registry, 'meta> {
visits: u64,
helper: &'meta mut Helper<'script, 'registry>,
}
impl<'script, 'registry, 'meta> ExprReducer<'script, 'registry, 'meta> {
pub(crate) fn new(helper: &'meta mut Helper<'script, 'registry>) -> Self {
Self { helper, visits: 0 }
}
pub(crate) fn reduce(&mut self, expr: &'meta mut ImutExprInt<'script>) -> Result<()> {
// Counts the number of visits needed to walk the expr.
self.walk_expr(expr)?;
// TODO: This is slow
let loops = self.visits;
for _ in 1..loops {
self.walk_expr(expr)?;
}
Ok(())
}
}
impl<'script, 'registry, 'meta> ImutExprIntVisitor<'script>
for ExprReducer<'script, 'registry, 'meta>
{
fn visit_expr(&mut self, e: &mut ImutExprInt<'script>) -> Result<VisitRes> {
self.visits += 1;
*e = e.clone().try_reduce(self.helper)?;
Ok(VisitRes::Walk)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ast::Expr;
use crate::errors::Result;
use crate::path::ModulePath;
use crate::registry::registry;
use simd_json::prelude::*;
#[derive(Default)]
struct Find42Visitor {
found: usize,
}
impl<'script> ImutExprIntVisitor<'script> for Find42Visitor {
fn visit_literal(&mut self, literal: &mut Literal<'script>) -> Result<VisitRes> {
if let Some(42) = literal.value.as_u64() {
self.found += 1;
return Ok(VisitRes::Stop);
}
Ok(VisitRes::Walk)
}
}
fn test_walk<'script>(script: &'script str, expected_42s: usize) -> Result<()> {
let module_path = ModulePath::load();
let mut registry = registry();
crate::std_lib::load(&mut registry);
let script_script: crate::script::Script =
crate::script::Script::parse(&module_path, "test", script.to_owned(), ®istry)?;
let script: &crate::ast::Script = script_script.script.suffix();
let mut imut_expr = script
.exprs
.iter()
.filter_map(|e| {
if let Expr::Imut(expr) = e {
Some(expr)
} else {
None
}
})
.cloned()
.last()
.unwrap()
.into_static();
let mut visitor = Find42Visitor::default();
visitor.walk_expr(&mut imut_expr)?;
assert_eq!(
expected_42s, visitor.found,
"Did not find {} 42s in {:?}, only {}",
expected_42s, imut_expr, visitor.found
);
Ok(())
}
#[test]
fn test_visitor_walking() -> Result<()> {
test_walk(
r#"
fn hide_the_42(x) with
x + 1
end;
hide_the_42(
match event.foo of
case %{ field == " #{42 + event.foo} ", present foo, absent bar } => event.bar
case %[42] => event.snake
case a = %(42, ...) => a
default => event.snot
end
);
"#,
3,
)
}
#[test]
fn test_walk_list() -> Result<()> {
test_walk(
r#"
let x = event.bla + 1;
fn add(x, y) with
recur(x + y, 1)
end;
let zero = 0;
[
-event.foo,
(patch event of
insert "snot" => 42,
merge => {"snot": 42 - zero},
merge "badger" => {"snot": 42 - zero},
upsert "snot" => 42,
copy "snot" => "snotter",
erase "snotter"
end),
(merge event of {"foo": event[42:x]} end),
"~~~ #{ state[1] } ~~~",
x,
x[x],
add(event.foo, 42),
<<event.foo:8/unsigned>>
]
"#,
6,
)
}
#[test]
fn test_walk_comprehension() -> Result<()> {
test_walk(
r#"
(for group[0] of
case (i, e) =>
42 + i
end
)
"#,
1,
)
}
#[test]
fn test_group_expr_extractor() -> Result<()> {
let mut visitor = GroupByExprExtractor::new();
let lit_42 = ImutExprInt::Literal(Literal {
mid: 3,
value: tremor_value::Value::from(42),
});
let false_array = ImutExprInt::List(List {
mid: 5,
exprs: vec![crate::ast::ImutExpr(ImutExprInt::Literal(Literal {
mid: 6,
value: tremor_value::Value::from(false),
}))],
});
let group_by = GroupBy(GroupByInt::Set {
mid: 1,
items: vec![
GroupBy(GroupByInt::Expr {
mid: 2,
expr: lit_42.clone(),
}),
GroupBy(GroupByInt::Each {
mid: 4,
expr: false_array.clone(),
}),
],
});
visitor.extract_expressions(&group_by);
assert_eq!(2, visitor.expressions.len());
assert_eq!(&[lit_42, false_array], visitor.expressions.as_slice());
Ok(())
}
}
| 31.725933 | 116 | 0.495402 |
290d816a68484d41954e94d935deeacda1b6847f | 1,696 | use crate::{MobcError::*, Result, REDIS_CONN_STRING};
use mobc::{Connection, Pool};
use mobc_redis::redis::{AsyncCommands, FromRedisValue};
use mobc_redis::{redis, RedisConnectionManager};
use std::time::Duration;
pub type MobcPool = Pool<RedisConnectionManager>;
pub type MobcCon = Connection<RedisConnectionManager>;
const CACHE_POOL_MAX_OPEN: u64 = 16;
const CACHE_POOL_MAX_IDLE: u64 = 8;
const CACHE_POOL_TIMEOUT_SECONDS: u64 = 1;
const CACHE_POOL_EXPIRE_SECONDS: u64 = 60;
pub async fn connect() -> Result<MobcPool> {
let client = redis::Client::open(REDIS_CONN_STRING).map_err(RedisClientError)?;
let manager = RedisConnectionManager::new(client);
Ok(Pool::builder()
.get_timeout(Some(Duration::from_secs(CACHE_POOL_TIMEOUT_SECONDS)))
.max_open(CACHE_POOL_MAX_OPEN)
.max_idle(CACHE_POOL_MAX_IDLE)
.max_lifetime(Some(Duration::from_secs(CACHE_POOL_EXPIRE_SECONDS)))
.build(manager))
}
async fn get_con(pool: &MobcPool) -> Result<MobcCon> {
pool.get().await.map_err(|e| {
eprintln!("error connecting to redis: {}", e);
RedisPoolError(e).into()
})
}
pub async fn set_str(pool: &MobcPool, key: &str, value: &str, ttl_seconds: usize) -> Result<()> {
let mut con = get_con(&pool).await?;
con.set(key, value).await.map_err(RedisCMDError)?;
if ttl_seconds > 0 {
con.expire(key, ttl_seconds).await.map_err(RedisCMDError)?;
}
Ok(())
}
pub async fn get_str(pool: &MobcPool, key: &str) -> Result<String> {
let mut con = get_con(&pool).await?;
let value = con.get(key).await.map_err(RedisCMDError)?;
FromRedisValue::from_redis_value(&value).map_err(|e| RedisTypeError(e).into())
}
| 36.085106 | 97 | 0.699882 |
de92292ba803ede03f5998903dc1b1a983c459cd | 5,358 | //! The "garbage collector" looks for objects that have no owner
//! and collects them. The intent is that "compiled dada" would not
//! have a gc, but that it would be equivalent to the interpreter.
//!
//! The gc currently runs after every step, keeping things tidy.
use dada_collections::Set;
use dada_ir::storage_mode::Leased;
use crate::machine::{op::MachineOp, Object, ObjectData, Permission, PermissionData, Value};
use super::Stepper;
impl Stepper<'_> {
/// Garbage collector: removes any objects that do not have an owner.
/// Removes any permissions that do not appear in a live spot.
///
/// Note: this relies on the Dada permission system for correctness.
/// For example, if you have a lease on an object that is then
/// freed, we assume that this lease would be revoked (and thus you would
/// have an expired permission).
#[tracing::instrument(level = "Debug", skip(self))]
pub(super) fn gc(&mut self, in_flight_values: &[Value]) {
let mut marks = Marks::default();
Marker::new(self.machine, &mut marks).mark(in_flight_values);
self.sweep(&marks);
}
}
#[derive(Debug, Default)]
struct Marks {
live_objects: Set<Object>,
live_permissions: Set<Permission>,
}
struct Marker<'me> {
machine: &'me dyn MachineOp,
marks: &'me mut Marks,
}
impl<'me> Marker<'me> {
fn new(machine: &'me dyn MachineOp, marks: &'me mut Marks) -> Self {
Self { machine, marks }
}
#[tracing::instrument(level = "Debug", skip(self))]
fn mark(&mut self, in_flight_values: &[Value]) {
for frame in self.machine.frames() {
for local_value in &frame.locals {
self.mark_value(*local_value);
}
}
for in_flight_value in in_flight_values {
self.mark_value(*in_flight_value);
}
// the singleton unit object is always live :)
self.marks.live_objects.insert(self.machine.unit_object());
}
#[tracing::instrument(level = "Debug", skip(self))]
fn mark_values(&mut self, values: &[Value]) {
for value in values {
self.mark_value(*value);
}
}
/// Marks a value that is reachable from something live (i.e., a root value).
fn mark_value(&mut self, value: Value) {
// The *permission* lives in a live spot, therefore it is live.
// This also keeps "expired" permissions live.
self.marks.live_permissions.insert(value.permission);
let PermissionData::Valid(valid) = &self.machine[value.permission] else {
tracing::debug!("marking expired permission but skipping object: {:?}", value);
return;
};
if let Leased::Yes = valid.leased {
// a lease alone isn't enough to keep data alive
tracing::trace!("skipping leased value: {:?} valid={:?}", value, valid);
return;
}
if !self.marks.live_objects.insert(value.object) {
// already visited
tracing::trace!("skipping already visited object: {:?}", value);
return;
}
tracing::debug!("marking value: {:?}", value);
let object_data: &ObjectData = &self.machine[value.object];
match object_data {
ObjectData::Instance(i) => self.mark_values(&i.fields),
ObjectData::ThunkFn(f) => self.mark_values(&f.arguments),
ObjectData::ThunkRust(f) => self.mark_values(&f.arguments),
ObjectData::Tuple(t) => self.mark_values(&t.fields),
ObjectData::Class(_)
| ObjectData::Function(_)
| ObjectData::Intrinsic(_)
| ObjectData::Bool(_)
| ObjectData::Uint(_)
| ObjectData::Int(_)
| ObjectData::Float(_)
| ObjectData::String(_)
| ObjectData::Unit(_) => {
// no reachable data
}
}
}
}
impl Stepper<'_> {
#[tracing::instrument(level = "Debug", skip(self))]
fn sweep(&mut self, marks: &Marks) {
let mut live_permissions = self.machine.all_permissions();
let mut dead_permissions = live_permissions.clone();
live_permissions.retain(|p| marks.live_permissions.contains(p));
dead_permissions.retain(|p| !marks.live_permissions.contains(p));
// First: revoke all the dead permissions.
for &p in &dead_permissions {
tracing::debug!("revoking dead permission {:?}", p);
self.revoke(p);
}
// Next: remove them from the heap.
for &p in &dead_permissions {
let data = self.machine.take_permission(p);
tracing::debug!("removed dead permission {:?} = {:?}", p, data);
}
// Next: for each *live* permission, remove any dead tenants.
for &p in &live_permissions {
if let PermissionData::Valid(valid) = &mut self.machine[p] {
valid.tenants.retain(|p| marks.live_permissions.contains(p));
}
}
// Finally: remove dead objects.
let mut dead_objects = self.machine.all_objects();
dead_objects.retain(|o| !marks.live_objects.contains(o));
for &o in &dead_objects {
let data = self.machine.take_object(o);
tracing::debug!("freeing {:?}: {:?}", o, data);
}
}
}
| 35.25 | 91 | 0.593505 |
3a8c6ffc9b1471b3f1e303935c8c1fbb40a15e31 | 69,856 | //! Solc artifact types
use ethers_core::abi::Abi;
use colored::Colorize;
use md5::Digest;
use semver::{Version, VersionReq};
use std::{
collections::{BTreeMap, HashSet},
fmt, fs,
path::{Path, PathBuf},
str::FromStr,
};
use crate::{
compile::*, error::SolcIoError, remappings::Remapping, utils, ProjectPathsConfig, SolcError,
};
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
use tracing::warn;
pub mod ast;
pub use ast::*;
pub mod bytecode;
pub mod contract;
pub mod output_selection;
pub mod serde_helpers;
use crate::{
artifacts::output_selection::{ContractOutputSelection, OutputSelection},
filter::FilteredSources,
};
pub use bytecode::*;
pub use contract::*;
pub use serde_helpers::{deserialize_bytes, deserialize_opt_bytes};
/// Solidity files are made up of multiple `source units`, a solidity contract is such a `source
/// unit`, therefore a solidity file can contain multiple contracts: (1-N*) relationship.
///
/// This types represents this mapping as `file name -> (contract name -> T)`, where the generic is
/// intended to represent contract specific information, like [`Contract`] itself, See [`Contracts`]
pub type FileToContractsMap<T> = BTreeMap<String, BTreeMap<String, T>>;
/// file -> (contract name -> Contract)
pub type Contracts = FileToContractsMap<Contract>;
/// An ordered list of files and their source
pub type Sources = BTreeMap<PathBuf, Source>;
/// A set of different Solc installations with their version and the sources to be compiled
pub(crate) type VersionedSources = BTreeMap<Solc, (Version, Sources)>;
/// A set of different Solc installations with their version and the sources to be compiled
pub(crate) type VersionedFilteredSources = BTreeMap<Solc, (Version, FilteredSources)>;
const SOLIDITY: &str = "Solidity";
/// Input type `solc` expects
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CompilerInput {
pub language: String,
pub sources: Sources,
pub settings: Settings,
}
impl CompilerInput {
/// Reads all contracts found under the path
pub fn new(path: impl AsRef<Path>) -> Result<Vec<Self>, SolcIoError> {
Source::read_all_from(path.as_ref()).map(Self::with_sources)
}
/// Creates a new [CompilerInput]s with default settings and the given sources
///
/// A [CompilerInput] expects a language setting, supported by solc are solidity or yul.
/// In case the `sources` is a mix of solidity and yul files, 2 CompilerInputs are returned
pub fn with_sources(sources: Sources) -> Vec<Self> {
let mut solidity_sources = BTreeMap::new();
let mut yul_sources = BTreeMap::new();
for (path, source) in sources {
if path.extension() == Some(std::ffi::OsStr::new("yul")) {
yul_sources.insert(path, source);
} else {
solidity_sources.insert(path, source);
}
}
let mut res = Vec::new();
if !solidity_sources.is_empty() {
res.push(Self {
language: SOLIDITY.to_string(),
sources: solidity_sources,
settings: Default::default(),
});
}
if !yul_sources.is_empty() {
res.push(Self {
language: "Yul".to_string(),
sources: yul_sources,
settings: Default::default(),
});
}
res
}
/// This will remove/adjust values in the `CompilerInput` that are not compatible with this
/// version
pub fn sanitized(mut self, version: &Version) -> Self {
static PRE_V0_6_0: once_cell::sync::Lazy<VersionReq> =
once_cell::sync::Lazy::new(|| VersionReq::parse("<0.6.0").unwrap());
static PRE_V0_8_10: once_cell::sync::Lazy<VersionReq> =
once_cell::sync::Lazy::new(|| VersionReq::parse("<0.8.10").unwrap());
static PRE_V0_7_5: once_cell::sync::Lazy<VersionReq> =
once_cell::sync::Lazy::new(|| VersionReq::parse("<0.7.5").unwrap());
if PRE_V0_6_0.matches(version) {
if let Some(ref mut meta) = self.settings.metadata {
// introduced in <https://docs.soliditylang.org/en/v0.6.0/using-the-compiler.html#compiler-api>
// missing in <https://docs.soliditylang.org/en/v0.5.17/using-the-compiler.html#compiler-api>
meta.bytecode_hash.take();
}
// introduced in <https://docs.soliditylang.org/en/v0.6.0/using-the-compiler.html#compiler-api>
let _ = self.settings.debug.take();
}
if PRE_V0_8_10.matches(version) {
if let Some(ref mut debug) = self.settings.debug {
// introduced in <https://docs.soliditylang.org/en/v0.8.10/using-the-compiler.html#compiler-api>
// <https://github.com/ethereum/solidity/releases/tag/v0.8.10>
debug.debug_info.clear();
}
// 0.8.10 is the earliest version that has all model checker options.
self.settings.model_checker = None;
}
if PRE_V0_7_5.matches(version) {
// introduced in 0.7.5 <https://github.com/ethereum/solidity/releases/tag/v0.7.5>
self.settings.via_ir.take();
}
self
}
/// Sets the settings for compilation
#[must_use]
pub fn settings(mut self, settings: Settings) -> Self {
self.settings = settings;
self
}
/// Sets the EVM version for compilation
#[must_use]
pub fn evm_version(mut self, version: EvmVersion) -> Self {
self.settings.evm_version = Some(version);
self
}
/// Sets the optimizer runs (default = 200)
#[must_use]
pub fn optimizer(mut self, runs: usize) -> Self {
self.settings.optimizer.runs(runs);
self
}
/// Normalizes the EVM version used in the settings to be up to the latest one
/// supported by the provided compiler version.
#[must_use]
pub fn normalize_evm_version(mut self, version: &Version) -> Self {
if let Some(ref mut evm_version) = self.settings.evm_version {
self.settings.evm_version = evm_version.normalize_version(version);
}
self
}
#[must_use]
pub fn with_remappings(mut self, remappings: Vec<Remapping>) -> Self {
self.settings.remappings = remappings;
self
}
/// Sets the path of the source files to `root` adjoined to the existing path
#[must_use]
pub fn join_path(mut self, root: impl AsRef<Path>) -> Self {
let root = root.as_ref();
self.sources = self.sources.into_iter().map(|(path, s)| (root.join(path), s)).collect();
self
}
/// Removes the `base` path from all source files
pub fn strip_prefix(mut self, base: impl AsRef<Path>) -> Self {
let base = base.as_ref();
self.sources = self
.sources
.into_iter()
.map(|(path, s)| (path.strip_prefix(base).map(|p| p.to_path_buf()).unwrap_or(path), s))
.collect();
self
}
}
/// A `CompilerInput` representation used for verify
///
/// This type is an alternative `CompilerInput` but uses non-alphabetic ordering of the `sources`
/// and instead emits the (Path -> Source) path in the same order as the pairs in the `sources`
/// `Vec`. This is used over a map, so we can determine the order in which etherscan will display
/// the verified contracts
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StandardJsonCompilerInput {
pub language: String,
#[serde(with = "serde_helpers::tuple_vec_map")]
pub sources: Vec<(PathBuf, Source)>,
pub settings: Settings,
}
// === impl StandardJsonCompilerInput ===
impl StandardJsonCompilerInput {
pub fn new(sources: Vec<(PathBuf, Source)>, settings: Settings) -> Self {
Self { language: SOLIDITY.to_string(), sources, settings }
}
/// Normalizes the EVM version used in the settings to be up to the latest one
/// supported by the provided compiler version.
#[must_use]
pub fn normalize_evm_version(mut self, version: &Version) -> Self {
if let Some(ref mut evm_version) = self.settings.evm_version {
self.settings.evm_version = evm_version.normalize_version(version);
}
self
}
}
impl From<StandardJsonCompilerInput> for CompilerInput {
fn from(input: StandardJsonCompilerInput) -> Self {
let StandardJsonCompilerInput { language, sources, settings } = input;
CompilerInput { language, sources: sources.into_iter().collect(), settings }
}
}
impl From<CompilerInput> for StandardJsonCompilerInput {
fn from(input: CompilerInput) -> Self {
let CompilerInput { language, sources, settings } = input;
StandardJsonCompilerInput { language, sources: sources.into_iter().collect(), settings }
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Settings {
/// Stop compilation after the given stage.
/// since 0.8.11: only "parsing" is valid here
#[serde(default, skip_serializing_if = "Option::is_none")]
pub stop_after: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub remappings: Vec<Remapping>,
pub optimizer: Optimizer,
/// Model Checker options.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model_checker: Option<ModelCheckerSettings>,
/// Metadata settings
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<SettingsMetadata>,
/// This field can be used to select desired outputs based
/// on file and contract names.
/// If this field is omitted, then the compiler loads and does type
/// checking, but will not generate any outputs apart from errors.
#[serde(default)]
pub output_selection: OutputSelection,
#[serde(
default,
with = "serde_helpers::display_from_str_opt",
skip_serializing_if = "Option::is_none"
)]
pub evm_version: Option<EvmVersion>,
/// Change compilation pipeline to go through the Yul intermediate representation. This is
/// false by default.
#[serde(rename = "viaIR", default, skip_serializing_if = "Option::is_none")]
pub via_ir: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub debug: Option<DebuggingSettings>,
/// Addresses of the libraries. If not all libraries are given here,
/// it can result in unlinked objects whose output data is different.
///
/// The top level key is the name of the source file where the library is used.
/// If remappings are used, this source file should match the global path
/// after remappings were applied.
/// If this key is an empty string, that refers to a global level.
#[serde(default, skip_serializing_if = "Libraries::is_empty")]
pub libraries: Libraries,
}
impl Settings {
/// Creates a new `Settings` instance with the given `output_selection`
pub fn new(output_selection: impl Into<OutputSelection>) -> Self {
Self { output_selection: output_selection.into(), ..Default::default() }
}
/// Inserts a set of `ContractOutputSelection`
pub fn push_all(&mut self, settings: impl IntoIterator<Item = ContractOutputSelection>) {
for value in settings {
self.push_output_selection(value)
}
}
/// Inserts a set of `ContractOutputSelection`
#[must_use]
pub fn with_extra_output(
mut self,
settings: impl IntoIterator<Item = ContractOutputSelection>,
) -> Self {
for value in settings {
self.push_output_selection(value)
}
self
}
/// Inserts the value for all files and contracts
///
/// ```
/// use ethers_solc::artifacts::output_selection::ContractOutputSelection;
/// use ethers_solc::artifacts::Settings;
/// let mut selection = Settings::default();
/// selection.push_output_selection(ContractOutputSelection::Metadata);
/// ```
pub fn push_output_selection(&mut self, value: impl ToString) {
self.push_contract_output_selection("*", value)
}
/// Inserts the `key` `value` pair to the `output_selection` for all files
///
/// If the `key` already exists, then the value is added to the existing list
pub fn push_contract_output_selection(
&mut self,
contracts: impl Into<String>,
value: impl ToString,
) {
let value = value.to_string();
let values = self
.output_selection
.as_mut()
.entry("*".to_string())
.or_default()
.entry(contracts.into())
.or_default();
if !values.contains(&value) {
values.push(value)
}
}
/// Sets the value for all files and contracts
pub fn set_output_selection(&mut self, values: impl IntoIterator<Item = impl ToString>) {
self.set_contract_output_selection("*", values)
}
/// Sets the `key` to the `values` pair to the `output_selection` for all files
///
/// This will replace the existing values for `key` if they're present
pub fn set_contract_output_selection(
&mut self,
key: impl Into<String>,
values: impl IntoIterator<Item = impl ToString>,
) {
self.output_selection
.as_mut()
.entry("*".to_string())
.or_default()
.insert(key.into(), values.into_iter().map(|s| s.to_string()).collect());
}
/// Sets the ``viaIR` valu
#[must_use]
pub fn set_via_ir(mut self, via_ir: bool) -> Self {
self.via_ir = Some(via_ir);
self
}
/// Enables `viaIR`
#[must_use]
pub fn with_via_ir(self) -> Self {
self.set_via_ir(true)
}
/// Adds `ast` to output
#[must_use]
pub fn with_ast(mut self) -> Self {
let output =
self.output_selection.as_mut().entry("*".to_string()).or_insert_with(BTreeMap::default);
output.insert("".to_string(), vec!["ast".to_string()]);
self
}
}
impl Default for Settings {
fn default() -> Self {
Self {
stop_after: None,
optimizer: Default::default(),
metadata: None,
output_selection: OutputSelection::default_output_selection(),
evm_version: Some(EvmVersion::default()),
via_ir: None,
debug: None,
libraries: Default::default(),
remappings: Default::default(),
model_checker: None,
}
.with_ast()
}
}
/// A wrapper type for all libraries in the form of `<file>:<lib>:<addr>`
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(transparent)]
pub struct Libraries {
/// All libraries, `(file path -> (Lib name -> Address))
pub libs: BTreeMap<PathBuf, BTreeMap<String, String>>,
}
// === impl Libraries ===
impl Libraries {
/// Parses all libraries in the form of
/// `<file>:<lib>:<addr>`
///
/// # Example
///
/// ```
/// use ethers_solc::artifacts::Libraries;
/// let libs = Libraries::parse(&[
/// "src/DssSpell.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string(),
/// ])
/// .unwrap();
/// ```
pub fn parse(libs: &[String]) -> Result<Self, SolcError> {
let mut libraries = BTreeMap::default();
for lib in libs {
let mut items = lib.split(':');
let file = items.next().ok_or_else(|| {
SolcError::msg(format!("failed to parse path to library file: {}", lib))
})?;
let lib = items
.next()
.ok_or_else(|| SolcError::msg(format!("failed to parse library name: {}", lib)))?;
let addr = items.next().ok_or_else(|| {
SolcError::msg(format!("failed to parse library address: {}", lib))
})?;
if items.next().is_some() {
return Err(SolcError::msg(format!(
"failed to parse, too many arguments passed: {}",
lib
)))
}
libraries
.entry(file.into())
.or_insert_with(BTreeMap::default)
.insert(lib.to_string(), addr.to_string());
}
Ok(Self { libs: libraries })
}
pub fn is_empty(&self) -> bool {
self.libs.is_empty()
}
pub fn len(&self) -> usize {
self.libs.len()
}
/// Solc expects the lib paths to match the global path after remappings were applied
///
/// See also [ProjectPathsConfig::resolve_import]
pub fn with_applied_remappings(mut self, config: &ProjectPathsConfig) -> Self {
self.libs = self
.libs
.into_iter()
.map(|(file, target)| {
let file = config.resolve_import(&config.root, &file).unwrap_or_else(|err| {
warn!(target: "libs", "Failed to resolve library `{}` for linking: {:?}", file.display(), err);
file
});
(file, target)
})
.collect();
self
}
}
impl From<BTreeMap<PathBuf, BTreeMap<String, String>>> for Libraries {
fn from(libs: BTreeMap<PathBuf, BTreeMap<String, String>>) -> Self {
Self { libs }
}
}
impl AsRef<BTreeMap<PathBuf, BTreeMap<String, String>>> for Libraries {
fn as_ref(&self) -> &BTreeMap<PathBuf, BTreeMap<String, String>> {
&self.libs
}
}
impl AsMut<BTreeMap<PathBuf, BTreeMap<String, String>>> for Libraries {
fn as_mut(&mut self) -> &mut BTreeMap<PathBuf, BTreeMap<String, String>> {
&mut self.libs
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Optimizer {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub runs: Option<usize>,
/// Switch optimizer components on or off in detail.
/// The "enabled" switch above provides two defaults which can be
/// tweaked here. If "details" is given, "enabled" can be omitted.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<OptimizerDetails>,
}
impl Optimizer {
pub fn runs(&mut self, runs: usize) {
self.runs = Some(runs);
}
pub fn disable(&mut self) {
self.enabled.take();
}
pub fn enable(&mut self) {
self.enabled = Some(true)
}
}
impl Default for Optimizer {
fn default() -> Self {
Self { enabled: Some(false), runs: Some(200), details: None }
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct OptimizerDetails {
/// The peephole optimizer is always on if no details are given,
/// use details to switch it off.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub peephole: Option<bool>,
/// The inliner is always on if no details are given,
/// use details to switch it off.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub inliner: Option<bool>,
/// The unused jumpdest remover is always on if no details are given,
/// use details to switch it off.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub jumpdest_remover: Option<bool>,
/// Sometimes re-orders literals in commutative operations.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub order_literals: Option<bool>,
/// Removes duplicate code blocks
#[serde(default, skip_serializing_if = "Option::is_none")]
pub deduplicate: Option<bool>,
/// Common subexpression elimination, this is the most complicated step but
/// can also provide the largest gain.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cse: Option<bool>,
/// Optimize representation of literal numbers and strings in code.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub constant_optimizer: Option<bool>,
/// The new Yul optimizer. Mostly operates on the code of ABI coder v2
/// and inline assembly.
/// It is activated together with the global optimizer setting
/// and can be deactivated here.
/// Before Solidity 0.6.0 it had to be activated through this switch.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub yul: Option<bool>,
/// Tuning options for the Yul optimizer.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub yul_details: Option<YulDetails>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct YulDetails {
/// Improve allocation of stack slots for variables, can free up stack slots early.
/// Activated by default if the Yul optimizer is activated.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub stack_allocation: Option<bool>,
/// Select optimization steps to be applied.
/// Optional, the optimizer will use the default sequence if omitted.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub optimizer_steps: Option<String>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum EvmVersion {
Homestead,
TangerineWhistle,
SpuriousDragon,
Byzantium,
Constantinople,
Petersburg,
Istanbul,
Berlin,
London,
}
impl Default for EvmVersion {
fn default() -> Self {
Self::London
}
}
impl EvmVersion {
/// Checks against the given solidity `semver::Version`
pub fn normalize_version(self, version: &Version) -> Option<EvmVersion> {
// the EVM version flag was only added at 0.4.21
// we work our way backwards
if version >= &CONSTANTINOPLE_SOLC {
// If the Solc is at least at london, it supports all EVM versions
Some(if version >= &LONDON_SOLC {
self
// For all other cases, cap at the at-the-time highest possible
// fork
} else if version >= &BERLIN_SOLC && self >= EvmVersion::Berlin {
EvmVersion::Berlin
} else if version >= &ISTANBUL_SOLC && self >= EvmVersion::Istanbul {
EvmVersion::Istanbul
} else if version >= &PETERSBURG_SOLC && self >= EvmVersion::Petersburg {
EvmVersion::Petersburg
} else if self >= EvmVersion::Constantinople {
EvmVersion::Constantinople
} else {
self
})
} else {
None
}
}
}
impl fmt::Display for EvmVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let string = match self {
EvmVersion::Homestead => "homestead",
EvmVersion::TangerineWhistle => "tangerineWhistle",
EvmVersion::SpuriousDragon => "spuriousDragon",
EvmVersion::Constantinople => "constantinople",
EvmVersion::Petersburg => "petersburg",
EvmVersion::Istanbul => "istanbul",
EvmVersion::Berlin => "berlin",
EvmVersion::London => "london",
EvmVersion::Byzantium => "byzantium",
};
write!(f, "{}", string)
}
}
impl FromStr for EvmVersion {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"homestead" => Ok(EvmVersion::Homestead),
"tangerineWhistle" => Ok(EvmVersion::TangerineWhistle),
"spuriousDragon" => Ok(EvmVersion::SpuriousDragon),
"constantinople" => Ok(EvmVersion::Constantinople),
"petersburg" => Ok(EvmVersion::Petersburg),
"istanbul" => Ok(EvmVersion::Istanbul),
"berlin" => Ok(EvmVersion::Berlin),
"london" => Ok(EvmVersion::London),
"byzantium" => Ok(EvmVersion::Byzantium),
s => Err(format!("Unknown evm version: {}", s)),
}
}
}
/// Debugging settings for solc
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DebuggingSettings {
#[serde(
default,
with = "serde_helpers::display_from_str_opt",
skip_serializing_if = "Option::is_none"
)]
pub revert_strings: Option<RevertStrings>,
///How much extra debug information to include in comments in the produced EVM assembly and
/// Yul code.
/// Available components are:
// - `location`: Annotations of the form `@src <index>:<start>:<end>` indicating the location of
// the corresponding element in the original Solidity file, where:
// - `<index>` is the file index matching the `@use-src` annotation,
// - `<start>` is the index of the first byte at that location,
// - `<end>` is the index of the first byte after that location.
// - `snippet`: A single-line code snippet from the location indicated by `@src`. The snippet is
// quoted and follows the corresponding `@src` annotation.
// - `*`: Wildcard value that can be used to request everything.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub debug_info: Vec<String>,
}
/// How to treat revert (and require) reason strings.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum RevertStrings {
/// "default" does not inject compiler-generated revert strings and keeps user-supplied ones.
Default,
/// "strip" removes all revert strings (if possible, i.e. if literals are used) keeping
/// side-effects
Strip,
/// "debug" injects strings for compiler-generated internal reverts, implemented for ABI
/// encoders V1 and V2 for now.
Debug,
/// "verboseDebug" even appends further information to user-supplied revert strings (not yet
/// implemented)
VerboseDebug,
}
impl fmt::Display for RevertStrings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let string = match self {
RevertStrings::Default => "default",
RevertStrings::Strip => "strip",
RevertStrings::Debug => "debug",
RevertStrings::VerboseDebug => "verboseDebug",
};
write!(f, "{}", string)
}
}
impl FromStr for RevertStrings {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"default" => Ok(RevertStrings::Default),
"strip" => Ok(RevertStrings::Strip),
"debug" => Ok(RevertStrings::Debug),
"verboseDebug" | "verbosedebug" => Ok(RevertStrings::VerboseDebug),
s => Err(format!("Unknown evm version: {}", s)),
}
}
}
impl Default for RevertStrings {
fn default() -> Self {
RevertStrings::Default
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct SettingsMetadata {
/// Use only literal content and not URLs (false by default)
#[serde(default, rename = "useLiteralContent", skip_serializing_if = "Option::is_none")]
pub use_literal_content: Option<bool>,
/// Use the given hash method for the metadata hash that is appended to the bytecode.
/// The metadata hash can be removed from the bytecode via option "none".
/// The other options are "ipfs" and "bzzr1".
/// If the option is omitted, "ipfs" is used by default.
#[serde(
default,
rename = "bytecodeHash",
skip_serializing_if = "Option::is_none",
with = "serde_helpers::display_from_str_opt"
)]
pub bytecode_hash: Option<BytecodeHash>,
}
impl From<BytecodeHash> for SettingsMetadata {
fn from(hash: BytecodeHash) -> Self {
Self { use_literal_content: None, bytecode_hash: Some(hash) }
}
}
/// Determines the hash method for the metadata hash that is appended to the bytecode.
///
/// Solc's default is `Ipfs`, see <https://docs.soliditylang.org/en/latest/using-the-compiler.html#compiler-api>.
#[derive(Clone, Debug, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum BytecodeHash {
Ipfs,
None,
Bzzr1,
}
impl Default for BytecodeHash {
fn default() -> Self {
BytecodeHash::Ipfs
}
}
impl FromStr for BytecodeHash {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"none" => Ok(BytecodeHash::None),
"ipfs" => Ok(BytecodeHash::Ipfs),
"bzzr1" => Ok(BytecodeHash::Bzzr1),
s => Err(format!("Unknown bytecode hash: {}", s)),
}
}
}
impl fmt::Display for BytecodeHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
BytecodeHash::Ipfs => "ipfs",
BytecodeHash::None => "none",
BytecodeHash::Bzzr1 => "bzzr1",
};
f.write_str(s)
}
}
/// Bindings for [`solc` contract metadata](https://docs.soliditylang.org/en/latest/metadata.html)
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Metadata {
pub compiler: Compiler,
pub language: String,
pub output: Output,
pub settings: MetadataSettings,
pub sources: MetadataSources,
pub version: i64,
}
/// Compiler settings
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct MetadataSettings {
/// Required for Solidity: File and name of the contract or library this metadata is created
/// for.
#[serde(default, rename = "compilationTarget")]
pub compilation_target: BTreeMap<String, String>,
#[serde(flatten)]
pub inner: Settings,
}
/// Compilation source files/source units, keys are file names
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct MetadataSources {
#[serde(flatten)]
pub inner: BTreeMap<String, MetadataSource>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct MetadataSource {
/// Required: keccak256 hash of the source file
pub keccak256: String,
/// Required (unless "content" is used, see below): Sorted URL(s)
/// to the source file, protocol is more or less arbitrary, but a
/// Swarm URL is recommended
#[serde(default)]
pub urls: Vec<String>,
/// Required (unless "url" is used): literal contents of the source file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub content: Option<String>,
/// Optional: SPDX license identifier as given in the source file
pub license: Option<String>,
}
/// Model checker settings for solc
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ModelCheckerSettings {
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub contracts: BTreeMap<String, Vec<String>>,
#[serde(
default,
with = "serde_helpers::display_from_str_opt",
skip_serializing_if = "Option::is_none"
)]
pub engine: Option<ModelCheckerEngine>,
#[serde(skip_serializing_if = "Option::is_none")]
pub timeout: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub targets: Option<Vec<ModelCheckerTarget>>,
}
/// Which model checker engine to run.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum ModelCheckerEngine {
Default,
All,
BMC,
CHC,
}
impl fmt::Display for ModelCheckerEngine {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let string = match self {
ModelCheckerEngine::Default => "none",
ModelCheckerEngine::All => "all",
ModelCheckerEngine::BMC => "bmc",
ModelCheckerEngine::CHC => "chc",
};
write!(f, "{}", string)
}
}
impl FromStr for ModelCheckerEngine {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"none" => Ok(ModelCheckerEngine::Default),
"all" => Ok(ModelCheckerEngine::All),
"bmc" => Ok(ModelCheckerEngine::BMC),
"chc" => Ok(ModelCheckerEngine::CHC),
s => Err(format!("Unknown model checker engine: {}", s)),
}
}
}
impl Default for ModelCheckerEngine {
fn default() -> Self {
ModelCheckerEngine::Default
}
}
/// Which model checker targets to check.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum ModelCheckerTarget {
Assert,
Underflow,
Overflow,
DivByZero,
ConstantCondition,
PopEmptyArray,
OutOfBounds,
Balance,
}
impl fmt::Display for ModelCheckerTarget {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let string = match self {
ModelCheckerTarget::Assert => "assert",
ModelCheckerTarget::Underflow => "underflow",
ModelCheckerTarget::Overflow => "overflow",
ModelCheckerTarget::DivByZero => "divByZero",
ModelCheckerTarget::ConstantCondition => "constantCondition",
ModelCheckerTarget::PopEmptyArray => "popEmptyArray",
ModelCheckerTarget::OutOfBounds => "outOfBounds",
ModelCheckerTarget::Balance => "balance",
};
write!(f, "{}", string)
}
}
impl FromStr for ModelCheckerTarget {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"assert" => Ok(ModelCheckerTarget::Assert),
"underflow" => Ok(ModelCheckerTarget::Underflow),
"overflow" => Ok(ModelCheckerTarget::Overflow),
"divByZero" => Ok(ModelCheckerTarget::DivByZero),
"constantCondition" => Ok(ModelCheckerTarget::ConstantCondition),
"popEmptyArray" => Ok(ModelCheckerTarget::PopEmptyArray),
"outOfBounds" => Ok(ModelCheckerTarget::OutOfBounds),
"balance" => Ok(ModelCheckerTarget::Balance),
s => Err(format!("Unknown model checker target: {}", s)),
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Compiler {
pub version: String,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Output {
pub abi: Vec<SolcAbi>,
pub devdoc: Option<Doc>,
pub userdoc: Option<Doc>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct SolcAbi {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub inputs: Vec<Item>,
#[serde(rename = "stateMutability")]
pub state_mutability: Option<String>,
#[serde(rename = "type")]
pub abi_type: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub outputs: Vec<Item>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Item {
#[serde(rename = "internalType")]
pub internal_type: Option<String>,
pub name: String,
#[serde(rename = "type")]
pub put_type: String,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Doc {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub methods: Option<DocLibraries>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<u32>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct DocLibraries {
#[serde(flatten)]
pub libs: BTreeMap<String, serde_json::Value>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct Source {
pub content: String,
}
impl Source {
/// this is a heuristically measured threshold at which we can generally expect a speedup by
/// using rayon's `par_iter`, See `Self::read_all_files`
pub const NUM_READ_PAR: usize = 8;
/// Reads the file content
pub fn read(file: impl AsRef<Path>) -> Result<Self, SolcIoError> {
let file = file.as_ref();
Ok(Self { content: fs::read_to_string(file).map_err(|err| SolcIoError::new(err, file))? })
}
/// Recursively finds all source files under the given dir path and reads them all
pub fn read_all_from(dir: impl AsRef<Path>) -> Result<Sources, SolcIoError> {
Self::read_all_files(utils::source_files(dir))
}
/// Reads all source files of the given vec
///
/// Depending on the len of the vec it will try to read the files in parallel
pub fn read_all_files(files: Vec<PathBuf>) -> Result<Sources, SolcIoError> {
use rayon::prelude::*;
if files.len() < Self::NUM_READ_PAR {
Self::read_all(files)
} else {
files
.par_iter()
.map(Into::into)
.map(|file| Self::read(&file).map(|source| (file, source)))
.collect()
}
}
/// Reads all files
pub fn read_all<T, I>(files: I) -> Result<Sources, SolcIoError>
where
I: IntoIterator<Item = T>,
T: Into<PathBuf>,
{
files
.into_iter()
.map(Into::into)
.map(|file| Self::read(&file).map(|source| (file, source)))
.collect()
}
/// Parallelized version of `Self::read_all` that reads all files using a parallel iterator
///
/// NOTE: this is only expected to be faster than `Self::read_all` if the given iterator
/// contains at least several paths. see also `Self::read_all_files`.
pub fn par_read_all<T, I>(files: I) -> Result<Sources, SolcIoError>
where
I: IntoIterator<Item = T>,
<I as IntoIterator>::IntoIter: Send,
T: Into<PathBuf> + Send,
{
use rayon::{iter::ParallelBridge, prelude::ParallelIterator};
files
.into_iter()
.par_bridge()
.map(Into::into)
.map(|file| Self::read(&file).map(|source| (file, source)))
.collect()
}
/// Generate a non-cryptographically secure checksum of the file's content
pub fn content_hash(&self) -> String {
let mut hasher = md5::Md5::new();
hasher.update(&self.content);
let result = hasher.finalize();
hex::encode(result)
}
/// Returns all import statements of the file
pub fn parse_imports(&self) -> Vec<&str> {
utils::find_import_paths(self.as_ref()).map(|m| m.as_str()).collect()
}
}
#[cfg(feature = "async")]
impl Source {
/// async version of `Self::read`
pub async fn async_read(file: impl AsRef<Path>) -> Result<Self, SolcIoError> {
let file = file.as_ref();
Ok(Self {
content: tokio::fs::read_to_string(file)
.await
.map_err(|err| SolcIoError::new(err, file))?,
})
}
/// Finds all source files under the given dir path and reads them all
pub async fn async_read_all_from(dir: impl AsRef<Path>) -> Result<Sources, SolcIoError> {
Self::async_read_all(utils::source_files(dir.as_ref())).await
}
/// async version of `Self::read_all`
pub async fn async_read_all<T, I>(files: I) -> Result<Sources, SolcIoError>
where
I: IntoIterator<Item = T>,
T: Into<PathBuf>,
{
futures_util::future::join_all(
files
.into_iter()
.map(Into::into)
.map(|file| async { Self::async_read(&file).await.map(|source| (file, source)) }),
)
.await
.into_iter()
.collect()
}
}
impl AsRef<str> for Source {
fn as_ref(&self) -> &str {
&self.content
}
}
/// Output type `solc` produces
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Default)]
pub struct CompilerOutput {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<Error>,
#[serde(default)]
pub sources: BTreeMap<String, SourceFile>,
#[serde(default)]
pub contracts: Contracts,
}
impl CompilerOutput {
/// Whether the output contains a compiler error
pub fn has_error(&self) -> bool {
self.errors.iter().any(|err| err.severity.is_error())
}
/// Whether the output contains a compiler warning
pub fn has_warning(&self, ignored_error_codes: &[u64]) -> bool {
self.errors.iter().any(|err| {
if err.severity.is_warning() {
err.error_code.as_ref().map_or(false, |code| !ignored_error_codes.contains(code))
} else {
false
}
})
}
/// Finds the _first_ contract with the given name
pub fn find(&self, contract: impl AsRef<str>) -> Option<CompactContractRef> {
let contract_name = contract.as_ref();
self.contracts_iter().find_map(|(name, contract)| {
(name == contract_name).then(|| CompactContractRef::from(contract))
})
}
/// Finds the first contract with the given name and removes it from the set
pub fn remove(&mut self, contract: impl AsRef<str>) -> Option<Contract> {
let contract_name = contract.as_ref();
self.contracts.values_mut().find_map(|c| c.remove(contract_name))
}
/// Iterate over all contracts and their names
pub fn contracts_iter(&self) -> impl Iterator<Item = (&String, &Contract)> {
self.contracts.values().flatten()
}
/// Iterate over all contracts and their names
pub fn contracts_into_iter(self) -> impl Iterator<Item = (String, Contract)> {
self.contracts.into_values().flatten()
}
/// Given the contract file's path and the contract's name, tries to return the contract's
/// bytecode, runtime bytecode, and abi
pub fn get(&self, path: &str, contract: &str) -> Option<CompactContractRef> {
self.contracts
.get(path)
.and_then(|contracts| contracts.get(contract))
.map(CompactContractRef::from)
}
/// Returns the output's source files and contracts separately, wrapped in helper types that
/// provide several helper methods
pub fn split(self) -> (SourceFiles, OutputContracts) {
(SourceFiles(self.sources), OutputContracts(self.contracts))
}
/// Retains only those files the given iterator yields
///
/// In other words, removes all contracts for files not included in the iterator
pub fn retain_files<'a, I>(&mut self, files: I)
where
I: IntoIterator<Item = &'a str>,
{
// Note: use `to_lowercase` here because solc not necessarily emits the exact file name,
// e.g. `src/utils/upgradeProxy.sol` is emitted as `src/utils/UpgradeProxy.sol`
let files: HashSet<_> = files.into_iter().map(|s| s.to_lowercase()).collect();
self.contracts.retain(|f, _| files.contains(f.to_lowercase().as_str()));
self.sources.retain(|f, _| files.contains(f.to_lowercase().as_str()));
self.errors.retain(|err| {
err.source_location
.as_ref()
.map(|s| files.contains(s.file.to_lowercase().as_str()))
.unwrap_or(true)
});
}
pub fn merge(&mut self, other: CompilerOutput) {
self.errors.extend(other.errors);
self.contracts.extend(other.contracts);
self.sources.extend(other.sources);
}
}
/// A wrapper helper type for the `Contracts` type alias
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct OutputContracts(pub Contracts);
impl OutputContracts {
/// Returns an iterator over all contracts and their source names.
pub fn into_contracts(self) -> impl Iterator<Item = (String, Contract)> {
self.0.into_values().flatten()
}
/// Iterate over all contracts and their names
pub fn contracts_iter(&self) -> impl Iterator<Item = (&String, &Contract)> {
self.0.values().flatten()
}
/// Finds the _first_ contract with the given name
pub fn find(&self, contract: impl AsRef<str>) -> Option<CompactContractRef> {
let contract_name = contract.as_ref();
self.contracts_iter().find_map(|(name, contract)| {
(name == contract_name).then(|| CompactContractRef::from(contract))
})
}
/// Finds the first contract with the given name and removes it from the set
pub fn remove(&mut self, contract: impl AsRef<str>) -> Option<Contract> {
let contract_name = contract.as_ref();
self.0.values_mut().find_map(|c| c.remove(contract_name))
}
}
/// A helper type that ensures lossless (de)serialisation unlike [`ethers_core::abi::Abi`] which
/// omits some information of (nested) components in a serde roundtrip. This is a problem for
/// abienconderv2 structs because [`ethers_core::abi::Contract`]'s representation of those are
/// [`ethers_core::abi::Param`] and the `kind` field of type [`ethers_core::abi::ParamType`] does
/// not support deeply nested components as it's the case for structs. This is not easily fixable in
/// ethabi as it would require a redesign of the overall `Param` and `ParamType` types. Instead,
/// this type keeps a copy of the [`serde_json::Value`] when deserialized from the `solc` json
/// compiler output and uses it to serialize the `abi` without loss.
#[derive(Clone, Debug, PartialEq, Default)]
pub struct LosslessAbi {
/// The complete abi as json value
pub abi_value: serde_json::Value,
/// The deserialised version of `abi_value`
pub abi: Abi,
}
impl From<LosslessAbi> for Abi {
fn from(abi: LosslessAbi) -> Self {
abi.abi
}
}
impl Serialize for LosslessAbi {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.abi_value.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for LosslessAbi {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let abi_value = serde_json::Value::deserialize(deserializer)?;
let abi = serde_json::from_value(abi_value.clone()).map_err(serde::de::Error::custom)?;
Ok(Self { abi_value, abi })
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
pub struct UserDoc {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<u32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "::std::collections::BTreeMap::is_empty")]
pub methods: BTreeMap<String, UserDocNotice>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub notice: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(untagged)]
pub enum UserDocNotice {
// NOTE: this a variant used for constructors on older solc versions
Constructor(String),
Method { notice: String },
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
pub struct DevDoc {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<u32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub author: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<String>,
#[serde(default, rename = "custom:experimental", skip_serializing_if = "Option::is_none")]
pub custom_experimental: Option<String>,
#[serde(default, skip_serializing_if = "::std::collections::BTreeMap::is_empty")]
pub methods: BTreeMap<String, MethodDoc>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
pub struct MethodDoc {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<String>,
#[serde(default, skip_serializing_if = "::std::collections::BTreeMap::is_empty")]
pub params: BTreeMap<String, String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub r#return: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Evm {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub assembly: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub legacy_assembly: Option<serde_json::Value>,
pub bytecode: Option<Bytecode>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub deployed_bytecode: Option<DeployedBytecode>,
/// The list of function hashes
#[serde(default, skip_serializing_if = "::std::collections::BTreeMap::is_empty")]
pub method_identifiers: BTreeMap<String, String>,
/// Function gas estimates
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gas_estimates: Option<GasEstimates>,
}
impl Evm {
/// Crate internal helper do transform the underlying bytecode artifacts into a more convenient
/// structure
pub(crate) fn into_compact(self) -> CompactEvm {
let Evm {
assembly,
legacy_assembly,
bytecode,
deployed_bytecode,
method_identifiers,
gas_estimates,
} = self;
let (bytecode, deployed_bytecode) = match (bytecode, deployed_bytecode) {
(Some(bcode), Some(dbcode)) => (Some(bcode.into()), Some(dbcode.into())),
(None, Some(dbcode)) => (None, Some(dbcode.into())),
(Some(bcode), None) => (Some(bcode.into()), None),
(None, None) => (None, None),
};
CompactEvm {
assembly,
legacy_assembly,
bytecode,
deployed_bytecode,
method_identifiers,
gas_estimates,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct CompactEvm {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub assembly: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub legacy_assembly: Option<serde_json::Value>,
pub bytecode: Option<CompactBytecode>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub deployed_bytecode: Option<CompactDeployedBytecode>,
/// The list of function hashes
#[serde(default, skip_serializing_if = "::std::collections::BTreeMap::is_empty")]
pub method_identifiers: BTreeMap<String, String>,
/// Function gas estimates
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gas_estimates: Option<GasEstimates>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct FunctionDebugData {
pub entry_point: Option<u32>,
pub id: Option<u32>,
pub parameter_slots: Option<u32>,
pub return_slots: Option<u32>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct GeneratedSource {
pub ast: serde_json::Value,
pub contents: String,
pub id: u32,
pub language: String,
pub name: String,
}
/// Byte offsets into the bytecode.
/// Linking replaces the 20 bytes located there.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct Offsets {
pub start: u32,
pub length: u32,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct GasEstimates {
pub creation: Creation,
#[serde(default)]
pub external: BTreeMap<String, String>,
#[serde(default)]
pub internal: BTreeMap<String, String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Creation {
pub code_deposit_cost: String,
pub execution_cost: String,
pub total_cost: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct Ewasm {
pub wast: String,
pub wasm: String,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
pub struct StorageLayout {
pub storage: Vec<Storage>,
#[serde(default, deserialize_with = "serde_helpers::default_for_null")]
pub types: BTreeMap<String, StorageType>,
}
impl StorageLayout {
fn is_empty(&self) -> bool {
self.storage.is_empty() && self.types.is_empty()
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct Storage {
#[serde(rename = "astId")]
pub ast_id: u64,
pub contract: String,
pub label: String,
pub offset: i64,
pub slot: String,
#[serde(rename = "type")]
pub storage_type: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct StorageType {
pub encoding: String,
pub label: String,
#[serde(rename = "numberOfBytes")]
pub number_of_bytes: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)]
#[serde(rename_all = "camelCase")]
pub struct Error {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source_location: Option<SourceLocation>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secondary_source_locations: Vec<SecondarySourceLocation>,
pub r#type: String,
pub component: String,
pub severity: Severity,
#[serde(default, with = "serde_helpers::display_from_str_opt")]
pub error_code: Option<u64>,
pub message: String,
pub formatted_message: Option<String>,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(msg) = &self.formatted_message {
match self.severity {
Severity::Error => {
if let Some(code) = self.error_code {
format!("error[{}]: ", code).as_str().red().fmt(f)?;
}
msg.as_str().red().fmt(f)
}
Severity::Warning | Severity::Info => {
if let Some(code) = self.error_code {
format!("warning[{}]: ", code).as_str().yellow().fmt(f)?;
}
msg.as_str().yellow().fmt(f)
}
}
} else {
self.severity.fmt(f)?;
writeln!(f, ": {}", self.message)
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum Severity {
Error,
Warning,
Info,
}
impl fmt::Display for Severity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Severity::Error => f.write_str(&"Error".red()),
Severity::Warning => f.write_str(&"Warning".yellow()),
Severity::Info => f.write_str("Info"),
}
}
}
impl Severity {
pub fn is_error(&self) -> bool {
matches!(self, Severity::Error)
}
pub fn is_warning(&self) -> bool {
matches!(self, Severity::Warning)
}
pub fn is_info(&self) -> bool {
matches!(self, Severity::Info)
}
}
impl FromStr for Severity {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"error" => Ok(Severity::Error),
"warning" => Ok(Severity::Warning),
"info" => Ok(Severity::Info),
s => Err(format!("Invalid severity: {}", s)),
}
}
}
impl Serialize for Severity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Severity::Error => serializer.serialize_str("error"),
Severity::Warning => serializer.serialize_str("warning"),
Severity::Info => serializer.serialize_str("info"),
}
}
}
impl<'de> Deserialize<'de> for Severity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct SeverityVisitor;
impl<'de> Visitor<'de> for SeverityVisitor {
type Value = Severity;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "severity string")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
value.parse().map_err(serde::de::Error::custom)
}
}
deserializer.deserialize_str(SeverityVisitor)
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)]
pub struct SourceLocation {
pub file: String,
pub start: i32,
pub end: i32,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)]
pub struct SecondarySourceLocation {
pub file: Option<String>,
pub start: Option<i32>,
pub end: Option<i32>,
pub message: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct SourceFile {
pub id: u32,
#[serde(default, with = "serde_helpers::empty_json_object_opt")]
pub ast: Option<Ast>,
}
// === impl SourceFile ===
impl SourceFile {
/// Returns `true` if the source file contains at least 1 `ContractDefinition` such as
/// `contract`, `abstract contract`, `interface` or `library`
pub fn contains_contract_definition(&self) -> bool {
if let Some(ref ast) = self.ast {
// contract definitions are only allowed at the source-unit level <https://docs.soliditylang.org/en/latest/grammar.html>
return ast.nodes.iter().any(|node| node.node_type == NodeType::ContractDefinition)
// abstract contract, interfaces: ContractDefinition
}
false
}
}
/// A wrapper type for a list of source files
/// `path -> SourceFile`
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct SourceFiles(pub BTreeMap<String, SourceFile>);
impl SourceFiles {
/// Returns an iterator over the source files' ids and path
///
/// ```
/// use std::collections::BTreeMap;
/// use ethers_solc::artifacts::SourceFiles;
/// # fn demo(files: SourceFiles) {
/// let sources: BTreeMap<u32,String> = files.into_ids().collect();
/// # }
/// ```
pub fn into_ids(self) -> impl Iterator<Item = (u32, String)> {
self.0.into_iter().map(|(k, v)| (v.id, k))
}
/// Returns an iterator over the source files' paths and ids
///
/// ```
/// use std::collections::BTreeMap;
/// use ethers_solc::artifacts::SourceFiles;
/// # fn demo(files: SourceFiles) {
/// let sources :BTreeMap<String, u32> = files.into_paths().collect();
/// # }
/// ```
pub fn into_paths(self) -> impl Iterator<Item = (String, u32)> {
self.0.into_iter().map(|(k, v)| (k, v.id))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::AggregatedCompilerOutput;
use ethers_core::types::Address;
use std::{fs, path::PathBuf};
#[test]
fn can_parse_declaration_error() {
let s = r#"{
"errors": [
{
"component": "general",
"errorCode": "7576",
"formattedMessage": "DeclarationError: Undeclared identifier. Did you mean \"revert\"?\n --> /Users/src/utils/UpgradeProxy.sol:35:17:\n |\n35 | refert(\"Transparent ERC1967 proxies do not have upgradeable implementations\");\n | ^^^^^^\n\n",
"message": "Undeclared identifier. Did you mean \"revert\"?",
"severity": "error",
"sourceLocation": {
"end": 1623,
"file": "/Users/src/utils/UpgradeProxy.sol",
"start": 1617
},
"type": "DeclarationError"
}
],
"sources": { }
}"#;
let out: CompilerOutput = serde_json::from_str(s).unwrap();
assert_eq!(out.errors.len(), 1);
let mut aggregated = AggregatedCompilerOutput::default();
aggregated.extend("0.8.12".parse().unwrap(), out);
assert!(!aggregated.is_unchanged());
}
#[test]
fn can_link_bytecode() {
// test cases taken from <https://github.com/ethereum/solc-js/blob/master/test/linker.js>
#[derive(Serialize, Deserialize)]
struct Mockject {
object: BytecodeObject,
}
fn parse_bytecode(bytecode: &str) -> BytecodeObject {
let object: Mockject =
serde_json::from_value(serde_json::json!({ "object": bytecode })).unwrap();
object.object
}
let bytecode = "6060604052341561000f57600080fd5b60f48061001d6000396000f300606060405260043610603e5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166326121ff081146043575b600080fd5b3415604d57600080fd5b60536055565b005b73__lib2.sol:L____________________________6326121ff06040518163ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040160006040518083038186803b151560b357600080fd5b6102c65a03f4151560c357600080fd5b5050505600a165627a7a723058207979b30bd4a07c77b02774a511f2a1dd04d7e5d65b5c2735b5fc96ad61d43ae40029";
let mut object = parse_bytecode(bytecode);
assert!(object.is_unlinked());
assert!(object.contains_placeholder("lib2.sol", "L"));
assert!(object.contains_fully_qualified_placeholder("lib2.sol:L"));
assert!(object.link("lib2.sol", "L", Address::random()).resolve().is_some());
assert!(!object.is_unlinked());
let mut code = Bytecode {
function_debug_data: Default::default(),
object: parse_bytecode(bytecode),
opcodes: None,
source_map: None,
generated_sources: vec![],
link_references: BTreeMap::from([(
"lib2.sol".to_string(),
BTreeMap::from([("L".to_string(), vec![])]),
)]),
};
assert!(!code.link("lib2.sol", "Y", Address::random()));
assert!(code.link("lib2.sol", "L", Address::random()));
assert!(code.link("lib2.sol", "L", Address::random()));
let hashed_placeholder = "6060604052341561000f57600080fd5b60f48061001d6000396000f300606060405260043610603e5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166326121ff081146043575b600080fd5b3415604d57600080fd5b60536055565b005b73__$cb901161e812ceb78cfe30ca65050c4337$__6326121ff06040518163ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040160006040518083038186803b151560b357600080fd5b6102c65a03f4151560c357600080fd5b5050505600a165627a7a723058207979b30bd4a07c77b02774a511f2a1dd04d7e5d65b5c2735b5fc96ad61d43ae40029";
let mut object = parse_bytecode(hashed_placeholder);
assert!(object.is_unlinked());
assert!(object.contains_placeholder("lib2.sol", "L"));
assert!(object.contains_fully_qualified_placeholder("lib2.sol:L"));
assert!(object.link("lib2.sol", "L", Address::default()).resolve().is_some());
assert!(!object.is_unlinked());
}
#[test]
fn can_parse_compiler_output() {
let mut dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
dir.push("test-data/out");
for path in fs::read_dir(dir).unwrap() {
let path = path.unwrap().path();
let compiler_output = fs::read_to_string(&path).unwrap();
serde_json::from_str::<CompilerOutput>(&compiler_output).unwrap_or_else(|err| {
panic!("Failed to read compiler output of {} {}", path.display(), err)
});
}
}
#[test]
fn can_parse_compiler_input() {
let mut dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
dir.push("test-data/in");
for path in fs::read_dir(dir).unwrap() {
let path = path.unwrap().path();
let compiler_input = fs::read_to_string(&path).unwrap();
serde_json::from_str::<CompilerInput>(&compiler_input).unwrap_or_else(|err| {
panic!("Failed to read compiler input of {} {}", path.display(), err)
});
}
}
#[test]
fn can_parse_standard_json_compiler_input() {
let mut dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
dir.push("test-data/in");
for path in fs::read_dir(dir).unwrap() {
let path = path.unwrap().path();
let compiler_input = fs::read_to_string(&path).unwrap();
let val = serde_json::from_str::<StandardJsonCompilerInput>(&compiler_input)
.unwrap_or_else(|err| {
panic!("Failed to read compiler output of {} {}", path.display(), err)
});
let pretty = serde_json::to_string_pretty(&val).unwrap();
serde_json::from_str::<CompilerInput>(&pretty).unwrap_or_else(|err| {
panic!("Failed to read converted compiler input of {} {}", path.display(), err)
});
}
}
#[test]
fn test_evm_version_normalization() {
for (solc_version, evm_version, expected) in &[
// Ensure 0.4.21 it always returns None
("0.4.20", EvmVersion::Homestead, None),
// Constantinople clipping
("0.4.21", EvmVersion::Homestead, Some(EvmVersion::Homestead)),
("0.4.21", EvmVersion::Constantinople, Some(EvmVersion::Constantinople)),
("0.4.21", EvmVersion::London, Some(EvmVersion::Constantinople)),
// Petersburg
("0.5.5", EvmVersion::Homestead, Some(EvmVersion::Homestead)),
("0.5.5", EvmVersion::Petersburg, Some(EvmVersion::Petersburg)),
("0.5.5", EvmVersion::London, Some(EvmVersion::Petersburg)),
// Istanbul
("0.5.14", EvmVersion::Homestead, Some(EvmVersion::Homestead)),
("0.5.14", EvmVersion::Istanbul, Some(EvmVersion::Istanbul)),
("0.5.14", EvmVersion::London, Some(EvmVersion::Istanbul)),
// Berlin
("0.8.5", EvmVersion::Homestead, Some(EvmVersion::Homestead)),
("0.8.5", EvmVersion::Berlin, Some(EvmVersion::Berlin)),
("0.8.5", EvmVersion::London, Some(EvmVersion::Berlin)),
// London
("0.8.7", EvmVersion::Homestead, Some(EvmVersion::Homestead)),
("0.8.7", EvmVersion::London, Some(EvmVersion::London)),
("0.8.7", EvmVersion::London, Some(EvmVersion::London)),
] {
assert_eq!(
&evm_version.normalize_version(&Version::from_str(solc_version).unwrap()),
expected
)
}
}
#[test]
fn can_sanitize_byte_code_hash() {
let version: Version = "0.6.0".parse().unwrap();
let settings = Settings { metadata: Some(BytecodeHash::Ipfs.into()), ..Default::default() };
let input = CompilerInput {
language: "Solidity".to_string(),
sources: Default::default(),
settings,
};
let i = input.clone().sanitized(&version);
assert_eq!(i.settings.metadata.unwrap().bytecode_hash, Some(BytecodeHash::Ipfs));
let version: Version = "0.5.17".parse().unwrap();
let i = input.sanitized(&version);
assert!(i.settings.metadata.unwrap().bytecode_hash.is_none());
}
#[test]
fn can_parse_libraries() {
let libraries = ["./src/lib/LibraryContract.sol:Library:0xaddress".to_string()];
let libs = Libraries::parse(&libraries[..]).unwrap().libs;
assert_eq!(
libs,
BTreeMap::from([(
PathBuf::from("./src/lib/LibraryContract.sol"),
BTreeMap::from([("Library".to_string(), "0xaddress".to_string())])
)])
);
}
#[test]
fn can_parse_many_libraries() {
let libraries= [
"./src/SizeAuctionDiscount.sol:Chainlink:0xffedba5e171c4f15abaaabc86e8bd01f9b54dae5".to_string(),
"./src/SizeAuction.sol:ChainlinkTWAP:0xffedba5e171c4f15abaaabc86e8bd01f9b54dae5".to_string(),
"./src/SizeAuction.sol:Math:0x902f6cf364b8d9470d5793a9b2b2e86bddd21e0c".to_string(),
"./src/test/ChainlinkTWAP.t.sol:ChainlinkTWAP:0xffedba5e171c4f15abaaabc86e8bd01f9b54dae5".to_string(),
"./src/SizeAuctionDiscount.sol:Math:0x902f6cf364b8d9470d5793a9b2b2e86bddd21e0c".to_string(),
];
let libs = Libraries::parse(&libraries[..]).unwrap().libs;
pretty_assertions::assert_eq!(
libs,
BTreeMap::from([
(
PathBuf::from("./src/SizeAuctionDiscount.sol"),
BTreeMap::from([
(
"Chainlink".to_string(),
"0xffedba5e171c4f15abaaabc86e8bd01f9b54dae5".to_string()
),
(
"Math".to_string(),
"0x902f6cf364b8d9470d5793a9b2b2e86bddd21e0c".to_string()
)
])
),
(
PathBuf::from("./src/SizeAuction.sol"),
BTreeMap::from([
(
"ChainlinkTWAP".to_string(),
"0xffedba5e171c4f15abaaabc86e8bd01f9b54dae5".to_string()
),
(
"Math".to_string(),
"0x902f6cf364b8d9470d5793a9b2b2e86bddd21e0c".to_string()
)
])
),
(
PathBuf::from("./src/test/ChainlinkTWAP.t.sol"),
BTreeMap::from([(
"ChainlinkTWAP".to_string(),
"0xffedba5e171c4f15abaaabc86e8bd01f9b54dae5".to_string()
)])
),
])
);
}
}
| 35.915681 | 582 | 0.618859 |
2283015b7de98c1cb5fd540cc9d8460714516700 | 371 | use napi::*;
#[js_function(1)]
pub fn detach_arraybuffer(ctx: CallContext) -> Result<JsUndefined> {
let input = ctx.get::<JsArrayBuffer>(0)?;
input.detach()?;
ctx.env.get_undefined()
}
#[js_function(1)]
pub fn is_detach_arraybuffer(ctx: CallContext) -> Result<JsBoolean> {
let input = ctx.get::<JsArrayBuffer>(0)?;
ctx.env.get_boolean(input.is_detached()?)
}
| 24.733333 | 69 | 0.698113 |
cc6d75674c97c13caad105294b0645ba0c0b778e | 1,190 | // Copyright 2018 The Frown Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use frown_chain as chain;
use frown_core as core;
use frown_p2p as p2p;
use frown_pool as pool;
use frown_util as util;
use failure;
#[macro_use]
extern crate failure_derive;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate log;
#[macro_use]
mod web;
pub mod auth;
pub mod client;
mod handlers;
mod rest;
mod router;
mod types;
pub use crate::auth::{BasicAuthMiddleware, FROWN_BASIC_REALM};
pub use crate::handlers::start_rest_apis;
pub use crate::rest::*;
pub use crate::router::*;
pub use crate::types::*;
pub use crate::web::*;
| 24.791667 | 75 | 0.748739 |
3336f2c4fc7405c82dd263bfa43f11cead03a973 | 1,179 | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::os::raw::c_void;
use core_foundation::base::{CFTypeID, TCFType};
use core_graphics::context::{CGContext, CGContextRef};
use foreign_types::{ForeignType, ForeignTypeRef};
#[repr(C)]
pub struct __CTFrame(c_void);
pub type CTFrameRef = *const __CTFrame;
declare_TCFType! {
CTFrame, CTFrameRef
}
impl_TCFType!(CTFrame, CTFrameRef, CTFrameGetTypeID);
impl_CFTypeDescription!(CTFrame);
impl CTFrame {
pub fn draw(&self, context: &CGContextRef) {
unsafe {
CTFrameDraw(self.as_concrete_TypeRef(), context.as_ptr());
}
}
}
#[link(name = "CoreText", kind = "framework")]
extern {
fn CTFrameGetTypeID() -> CFTypeID;
fn CTFrameDraw(frame: CTFrameRef, context: *mut <CGContext as ForeignType>::CType);
} | 31.026316 | 87 | 0.718405 |
e61a1ba41139c78d5dcea2a18f5ced43661561de | 1,208 | //https://leetcode-cn.com/problems/simplify-path/
// Runtime: 0 ms
// Memory Usage: 2 MB
pub fn simplify_path(path: String) -> String {
let mut stack = Vec::new();
let mut res = "".to_string();
for s in path.split_terminator('/') {
match s {
".." => {
stack.pop();
}
"" | "." => {
continue;
}
_ => {
stack.push(s);
}
}
}
for s in stack {
res += "/";
res += s;
}
if res.is_empty() {
res += "/";
}
res
}
// stack string
#[test]
fn test1_71() {
assert_eq!(simplify_path("/home/".to_string()), "/home".to_string());
assert_eq!(simplify_path("/../".to_string()), "/".to_string());
assert_eq!(
simplify_path("/home//foo/".to_string()),
"/home/foo".to_string()
);
assert_eq!(
simplify_path("/a/./b/../../c/".to_string()),
"/c".to_string()
);
assert_eq!(
simplify_path("/a/../../b/../c//.//".to_string()),
"/c".to_string()
);
assert_eq!(
simplify_path("/a//b////c/d//././/..".to_string()),
"/a/b/c".to_string()
);
}
| 23.686275 | 73 | 0.437914 |
ab1769f8cd00d6519ea6fb5ae5b6eb6ea890c06b | 1,221 | use argparse::{ArgumentParser, StoreConst};
#[cfg(feature="containers")]
use capsule::packages as capsule;
#[derive(Clone, Copy, Debug)]
pub enum CompressionType {
Gzip,
Bzip2,
Xz,
}
impl CompressionType {
pub fn get_short_option(&self) -> &str {
match *self {
CompressionType::Gzip => "-z",
CompressionType::Bzip2 => "-j",
CompressionType::Xz => "-J",
}
}
#[cfg(feature="containers")]
pub fn get_capsule_feature(&self) -> capsule::Feature {
match *self {
CompressionType::Gzip => capsule::Gzip,
CompressionType::Bzip2 => capsule::Bzip2,
CompressionType::Xz => capsule::Xz,
}
}
}
pub fn compression_type<'x>(ap: &mut ArgumentParser<'x>,
compression_type: &'x mut Option<CompressionType>)
{
ap.refer(compression_type)
.add_option(&["-z", "--gzip"], StoreConst(Some(CompressionType::Gzip)),
"Filter the image through gzip.")
.add_option(&["-j", "--bzip2"], StoreConst(Some(CompressionType::Bzip2)),
"Filter the image through bzip2.")
.add_option(&["-J", "--xz"], StoreConst(Some(CompressionType::Xz)),
"Filter the image through xz.");
}
| 27.75 | 77 | 0.59869 |
646539389e977e596bf4af969edc657348a67abd | 286 | pub mod buffer;
pub mod dispatch;
pub mod lsp;
pub mod plugin;
pub mod terminal;
pub mod watcher;
use dispatch::Dispatcher;
pub fn mainloop() {
let (sender, receiver) = lapce_rpc::stdio();
let dispatcher = Dispatcher::new(sender);
let _ = dispatcher.mainloop(receiver);
}
| 19.066667 | 48 | 0.702797 |
ed7dd377c835c529a52917c35f4b04e50c826cbb | 105,067 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! SQL Query Planner (produces logical plan from SQL AST)
use std::str::FromStr;
use std::sync::Arc;
use std::{convert::TryInto, vec};
use crate::catalog::TableReference;
use crate::datasource::TableProvider;
use crate::logical_plan::Expr::Alias;
use crate::logical_plan::{
and, lit, DFSchema, Expr, LogicalPlan, LogicalPlanBuilder, Operator, PlanType,
StringifiedPlan, ToDFSchema,
};
use crate::scalar::ScalarValue;
use crate::{
error::{DataFusionError, Result},
physical_plan::udaf::AggregateUDF,
};
use crate::{
physical_plan::udf::ScalarUDF,
physical_plan::{aggregates, functions},
sql::parser::{CreateExternalTable, FileType, Statement as DFStatement},
};
use arrow::datatypes::*;
use hashbrown::HashMap;
use crate::prelude::JoinType;
use sqlparser::ast::{
BinaryOperator, DataType as SQLDataType, DateTimeField, Expr as SQLExpr, FunctionArg,
Ident, Join, JoinConstraint, JoinOperator, ObjectName, Query, Select, SelectItem,
SetExpr, SetOperator, ShowStatementFilter, TableFactor, TableWithJoins,
UnaryOperator, Value,
};
use sqlparser::ast::{ColumnDef as SQLColumnDef, ColumnOption};
use sqlparser::ast::{OrderByExpr, Statement};
use sqlparser::parser::ParserError::ParserError;
use super::{
parser::DFParser,
utils::{
can_columns_satisfy_exprs, expand_wildcard, expr_as_column_expr, extract_aliases,
find_aggregate_exprs, find_column_exprs, rebase_expr, resolve_aliases_to_exprs,
},
};
/// The ContextProvider trait allows the query planner to obtain meta-data about tables and
/// functions referenced in SQL statements
pub trait ContextProvider {
/// Getter for a datasource
fn get_table_provider(&self, name: TableReference) -> Option<Arc<dyn TableProvider>>;
/// Getter for a UDF description
fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>>;
/// Getter for a UDAF description
fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>>;
}
/// SQL query planner
pub struct SqlToRel<'a, S: ContextProvider> {
schema_provider: &'a S,
}
impl<'a, S: ContextProvider> SqlToRel<'a, S> {
/// Create a new query planner
pub fn new(schema_provider: &'a S) -> Self {
SqlToRel { schema_provider }
}
/// Generate a logical plan from an DataFusion SQL statement
pub fn statement_to_plan(&self, statement: &DFStatement) -> Result<LogicalPlan> {
match statement {
DFStatement::CreateExternalTable(s) => self.external_table_to_plan(&s),
DFStatement::Statement(s) => self.sql_statement_to_plan(&s),
}
}
/// Generate a logical plan from an SQL statement
pub fn sql_statement_to_plan(&self, sql: &Statement) -> Result<LogicalPlan> {
match sql {
Statement::Explain {
verbose,
statement,
analyze: _,
} => self.explain_statement_to_plan(*verbose, &statement),
Statement::Query(query) => self.query_to_plan(&query),
Statement::ShowVariable { variable } => self.show_variable_to_plan(&variable),
Statement::ShowColumns {
extended,
full,
table_name,
filter,
} => self.show_columns_to_plan(*extended, *full, table_name, filter.as_ref()),
_ => Err(DataFusionError::NotImplemented(
"Only SELECT statements are implemented".to_string(),
)),
}
}
/// Generate a logic plan from an SQL query
pub fn query_to_plan(&self, query: &Query) -> Result<LogicalPlan> {
self.query_to_plan_with_alias(query, None, &mut HashMap::new())
}
/// Generate a logic plan from an SQL query with optional alias
pub fn query_to_plan_with_alias(
&self,
query: &Query,
alias: Option<String>,
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<LogicalPlan> {
let set_expr = &query.body;
if let Some(with) = &query.with {
// Process CTEs from top to bottom
// do not allow self-references
for cte in &with.cte_tables {
// create logical plan & pass backreferencing CTEs
let logical_plan = self.query_to_plan_with_alias(
&cte.query,
Some(cte.alias.name.value.clone()),
&mut ctes.clone(),
)?;
ctes.insert(cte.alias.name.value.clone(), logical_plan);
}
}
let plan = self.set_expr_to_plan(set_expr, alias, ctes)?;
let plan = self.order_by(&plan, &query.order_by)?;
self.limit(&plan, &query.limit)
}
fn set_expr_to_plan(
&self,
set_expr: &SetExpr,
alias: Option<String>,
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<LogicalPlan> {
match set_expr {
SetExpr::Select(s) => self.select_to_plan(s.as_ref(), ctes),
SetExpr::SetOperation {
op,
left,
right,
all,
} => match (op, all) {
(SetOperator::Union, true) => {
let left_plan = self.set_expr_to_plan(left.as_ref(), None, ctes)?;
let right_plan = self.set_expr_to_plan(right.as_ref(), None, ctes)?;
let inputs = vec![left_plan, right_plan]
.into_iter()
.flat_map(|p| match p {
LogicalPlan::Union { inputs, .. } => inputs,
x => vec![x],
})
.collect::<Vec<_>>();
if inputs.is_empty() {
return Err(DataFusionError::Plan(format!(
"Empty UNION: {}",
set_expr
)));
}
if !inputs.iter().all(|s| s.schema() == inputs[0].schema()) {
return Err(DataFusionError::Plan(
"UNION ALL schemas are expected to be the same".to_string(),
));
}
Ok(LogicalPlan::Union {
schema: inputs[0].schema().clone(),
inputs,
alias,
})
}
_ => Err(DataFusionError::NotImplemented(format!(
"Only UNION ALL is supported, found {}",
op
))),
},
_ => Err(DataFusionError::NotImplemented(format!(
"Query {} not implemented yet",
set_expr
))),
}
}
/// Generate a logical plan from a CREATE EXTERNAL TABLE statement
pub fn external_table_to_plan(
&self,
statement: &CreateExternalTable,
) -> Result<LogicalPlan> {
let CreateExternalTable {
name,
columns,
file_type,
has_header,
location,
} = statement;
// semantic checks
match *file_type {
FileType::CSV => {
if columns.is_empty() {
return Err(DataFusionError::Plan(
"Column definitions required for CSV files. None found".into(),
));
}
}
FileType::Parquet => {
if !columns.is_empty() {
return Err(DataFusionError::Plan(
"Column definitions can not be specified for PARQUET files."
.into(),
));
}
}
FileType::NdJson => {}
};
let schema = self.build_schema(&columns)?;
Ok(LogicalPlan::CreateExternalTable {
schema: schema.to_dfschema_ref()?,
name: name.clone(),
location: location.clone(),
file_type: *file_type,
has_header: *has_header,
})
}
/// Generate a plan for EXPLAIN ... that will print out a plan
///
pub fn explain_statement_to_plan(
&self,
verbose: bool,
statement: &Statement,
) -> Result<LogicalPlan> {
let plan = self.sql_statement_to_plan(&statement)?;
let stringified_plans = vec![StringifiedPlan::new(
PlanType::LogicalPlan,
format!("{:#?}", plan),
)];
let schema = LogicalPlan::explain_schema();
let plan = Arc::new(plan);
Ok(LogicalPlan::Explain {
verbose,
plan,
stringified_plans,
schema: schema.to_dfschema_ref()?,
})
}
fn build_schema(&self, columns: &[SQLColumnDef]) -> Result<Schema> {
let mut fields = Vec::new();
for column in columns {
let data_type = self.make_data_type(&column.data_type)?;
let allow_null = column
.options
.iter()
.any(|x| x.option == ColumnOption::Null);
fields.push(Field::new(&column.name.value, data_type, allow_null));
}
Ok(Schema::new(fields))
}
/// Maps the SQL type to the corresponding Arrow `DataType`
fn make_data_type(&self, sql_type: &SQLDataType) -> Result<DataType> {
match sql_type {
SQLDataType::BigInt => Ok(DataType::Int64),
SQLDataType::Int => Ok(DataType::Int32),
SQLDataType::SmallInt => Ok(DataType::Int16),
SQLDataType::Char(_) | SQLDataType::Varchar(_) | SQLDataType::Text => {
Ok(DataType::Utf8)
}
SQLDataType::Decimal(_, _) => Ok(DataType::Float64),
SQLDataType::Float(_) => Ok(DataType::Float32),
SQLDataType::Real | SQLDataType::Double => Ok(DataType::Float64),
SQLDataType::Boolean => Ok(DataType::Boolean),
SQLDataType::Date => Ok(DataType::Date32),
SQLDataType::Time => Ok(DataType::Time64(TimeUnit::Millisecond)),
SQLDataType::Timestamp => Ok(DataType::Timestamp(TimeUnit::Nanosecond, None)),
_ => Err(DataFusionError::NotImplemented(format!(
"The SQL data type {:?} is not implemented",
sql_type
))),
}
}
fn plan_from_tables(
&self,
from: &[TableWithJoins],
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<Vec<LogicalPlan>> {
match from.len() {
0 => Ok(vec![LogicalPlanBuilder::empty(true).build()?]),
_ => from
.iter()
.map(|t| self.plan_table_with_joins(t, ctes))
.collect::<Result<Vec<_>>>(),
}
}
fn plan_table_with_joins(
&self,
t: &TableWithJoins,
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<LogicalPlan> {
let left = self.create_relation(&t.relation, ctes)?;
match t.joins.len() {
0 => Ok(left),
n => {
let mut left = self.parse_relation_join(&left, &t.joins[0], ctes)?;
for i in 1..n {
left = self.parse_relation_join(&left, &t.joins[i], ctes)?;
}
Ok(left)
}
}
}
fn parse_relation_join(
&self,
left: &LogicalPlan,
join: &Join,
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<LogicalPlan> {
let right = self.create_relation(&join.relation, ctes)?;
match &join.join_operator {
JoinOperator::LeftOuter(constraint) => {
self.parse_join(left, &right, constraint, JoinType::Left)
}
JoinOperator::RightOuter(constraint) => {
self.parse_join(left, &right, constraint, JoinType::Right)
}
JoinOperator::Inner(constraint) => {
self.parse_join(left, &right, constraint, JoinType::Inner)
}
JoinOperator::CrossJoin => self.parse_cross_join(left, &right),
other => Err(DataFusionError::NotImplemented(format!(
"Unsupported JOIN operator {:?}",
other
))),
}
}
fn parse_cross_join(
&self,
left: &LogicalPlan,
right: &LogicalPlan,
) -> Result<LogicalPlan> {
LogicalPlanBuilder::from(&left).cross_join(&right)?.build()
}
fn parse_join(
&self,
left: &LogicalPlan,
right: &LogicalPlan,
constraint: &JoinConstraint,
join_type: JoinType,
) -> Result<LogicalPlan> {
match constraint {
JoinConstraint::On(sql_expr) => {
let mut keys: Vec<(String, String)> = vec![];
let join_schema = left.schema().join(&right.schema())?;
// parse ON expression
let expr = self.sql_to_rex(sql_expr, &join_schema)?;
// extract join keys
extract_join_keys(&expr, &mut keys)?;
let left_keys: Vec<&str> =
keys.iter().map(|pair| pair.0.as_str()).collect();
let right_keys: Vec<&str> =
keys.iter().map(|pair| pair.1.as_str()).collect();
// return the logical plan representing the join
LogicalPlanBuilder::from(&left)
.join(&right, join_type, &left_keys, &right_keys)?
.build()
}
JoinConstraint::Using(idents) => {
let keys: Vec<&str> = idents.iter().map(|x| x.value.as_str()).collect();
LogicalPlanBuilder::from(&left)
.join(&right, join_type, &keys, &keys)?
.build()
}
JoinConstraint::Natural => {
// https://issues.apache.org/jira/browse/ARROW-10727
Err(DataFusionError::NotImplemented(
"NATURAL JOIN is not supported (https://issues.apache.org/jira/browse/ARROW-10727)".to_string(),
))
}
JoinConstraint::None => Err(DataFusionError::NotImplemented(
"NONE contraint is not supported".to_string(),
)),
}
}
fn create_relation(
&self,
relation: &TableFactor,
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<LogicalPlan> {
match relation {
TableFactor::Table { name, .. } => {
let table_name = name.to_string();
let cte = ctes.get(&table_name);
match (
cte,
self.schema_provider.get_table_provider(name.try_into()?),
) {
(Some(cte_plan), _) => Ok(cte_plan.clone()),
(_, Some(provider)) => {
LogicalPlanBuilder::scan(&table_name, provider, None)?.build()
}
(_, None) => Err(DataFusionError::Plan(format!(
"Table or CTE with name '{}' not found",
name
))),
}
}
TableFactor::Derived {
subquery, alias, ..
} => self.query_to_plan_with_alias(
subquery,
alias.as_ref().map(|a| a.name.value.to_string()),
ctes,
),
TableFactor::NestedJoin(table_with_joins) => {
self.plan_table_with_joins(table_with_joins, ctes)
}
// @todo Support TableFactory::TableFunction?
_ => Err(DataFusionError::NotImplemented(format!(
"Unsupported ast node {:?} in create_relation",
relation
))),
}
}
/// Generate a logic plan from an SQL select
fn select_to_plan(
&self,
select: &Select,
ctes: &mut HashMap<String, LogicalPlan>,
) -> Result<LogicalPlan> {
let plans = self.plan_from_tables(&select.from, ctes)?;
let plan = match &select.selection {
Some(predicate_expr) => {
// build join schema
let mut fields = vec![];
for plan in &plans {
fields.extend_from_slice(&plan.schema().fields());
}
let join_schema = DFSchema::new(fields)?;
let filter_expr = self.sql_to_rex(predicate_expr, &join_schema)?;
// look for expressions of the form `<column> = <column>`
let mut possible_join_keys = vec![];
extract_possible_join_keys(&filter_expr, &mut possible_join_keys)?;
let mut all_join_keys = vec![];
let mut left = plans[0].clone();
for right in plans.iter().skip(1) {
let left_schema = left.schema();
let right_schema = right.schema();
let mut join_keys = vec![];
for (l, r) in &possible_join_keys {
if left_schema.field_with_unqualified_name(l).is_ok()
&& right_schema.field_with_unqualified_name(r).is_ok()
{
join_keys.push((l.as_str(), r.as_str()));
} else if left_schema.field_with_unqualified_name(r).is_ok()
&& right_schema.field_with_unqualified_name(l).is_ok()
{
join_keys.push((r.as_str(), l.as_str()));
}
}
if join_keys.is_empty() {
left =
LogicalPlanBuilder::from(&left).cross_join(right)?.build()?;
} else {
let left_keys: Vec<_> =
join_keys.iter().map(|(l, _)| *l).collect();
let right_keys: Vec<_> =
join_keys.iter().map(|(_, r)| *r).collect();
let builder = LogicalPlanBuilder::from(&left);
left = builder
.join(right, JoinType::Inner, &left_keys, &right_keys)?
.build()?;
}
all_join_keys.extend_from_slice(&join_keys);
}
// remove join expressions from filter
match remove_join_expressions(&filter_expr, &all_join_keys)? {
Some(filter_expr) => {
LogicalPlanBuilder::from(&left).filter(filter_expr)?.build()
}
_ => Ok(left),
}
}
None => {
if plans.len() == 1 {
Ok(plans[0].clone())
} else {
let mut left = plans[0].clone();
for right in plans.iter().skip(1) {
left =
LogicalPlanBuilder::from(&left).cross_join(right)?.build()?;
}
Ok(left)
}
}
};
let plan = plan?;
// The SELECT expressions, with wildcards expanded.
let select_exprs = self.prepare_select_exprs(&plan, &select.projection)?;
// Optionally the HAVING expression.
let having_expr_opt = select
.having
.as_ref()
.map::<Result<Expr>, _>(|having_expr| {
let having_expr = self.sql_expr_to_logical_expr(having_expr)?;
// This step "dereferences" any aliases in the HAVING clause.
//
// This is how we support queries with HAVING expressions that
// refer to aliased columns.
//
// For example:
//
// SELECT c1 AS m FROM t HAVING m > 10;
// SELECT c1, MAX(c2) AS m FROM t GROUP BY c1 HAVING m > 10;
//
// are rewritten as, respectively:
//
// SELECT c1 AS m FROM t HAVING c1 > 10;
// SELECT c1, MAX(c2) AS m FROM t GROUP BY c1 HAVING MAX(c2) > 10;
//
let having_expr = resolve_aliases_to_exprs(
&having_expr,
&extract_aliases(&select_exprs),
)?;
Ok(having_expr)
})
.transpose()?;
// The outer expressions we will search through for
// aggregates. Aggregates may be sourced from the SELECT...
let mut aggr_expr_haystack = select_exprs.clone();
// ... or from the HAVING.
if let Some(having_expr) = &having_expr_opt {
aggr_expr_haystack.push(having_expr.clone());
}
// All of the aggregate expressions (deduplicated).
let aggr_exprs = find_aggregate_exprs(&aggr_expr_haystack);
let (plan, select_exprs_post_aggr, having_expr_post_aggr_opt) =
if !select.group_by.is_empty() || !aggr_exprs.is_empty() {
self.aggregate(
&plan,
&select_exprs,
&having_expr_opt,
&select.group_by,
aggr_exprs,
)?
} else {
if let Some(having_expr) = &having_expr_opt {
let available_columns = select_exprs
.iter()
.map(|expr| expr_as_column_expr(expr, &plan))
.collect::<Result<Vec<Expr>>>()?;
// Ensure the HAVING expression is using only columns
// provided by the SELECT.
if !can_columns_satisfy_exprs(
&available_columns,
&[having_expr.clone()],
)? {
return Err(DataFusionError::Plan(
"Having references column(s) not provided by the select"
.to_owned(),
));
}
}
(plan, select_exprs, having_expr_opt)
};
let plan = if let Some(having_expr_post_aggr) = having_expr_post_aggr_opt {
LogicalPlanBuilder::from(&plan)
.filter(having_expr_post_aggr)?
.build()?
} else {
plan
};
let plan = if select.distinct {
return LogicalPlanBuilder::from(&plan)
.aggregate(select_exprs_post_aggr, vec![])?
.build();
} else {
plan
};
self.project(&plan, select_exprs_post_aggr)
}
/// Returns the `Expr`'s corresponding to a SQL query's SELECT expressions.
///
/// Wildcards are expanded into the concrete list of columns.
fn prepare_select_exprs(
&self,
plan: &LogicalPlan,
projection: &[SelectItem],
) -> Result<Vec<Expr>> {
let input_schema = plan.schema();
Ok(projection
.iter()
.map(|expr| self.sql_select_to_rex(&expr, &input_schema))
.collect::<Result<Vec<Expr>>>()?
.iter()
.flat_map(|expr| expand_wildcard(&expr, &input_schema))
.collect::<Vec<Expr>>())
}
/// Wrap a plan in a projection
fn project(&self, input: &LogicalPlan, expr: Vec<Expr>) -> Result<LogicalPlan> {
self.validate_schema_satisfies_exprs(&input.schema(), &expr)?;
LogicalPlanBuilder::from(input).project(expr)?.build()
}
fn aggregate(
&self,
input: &LogicalPlan,
select_exprs: &[Expr],
having_expr_opt: &Option<Expr>,
group_by: &[SQLExpr],
aggr_exprs: Vec<Expr>,
) -> Result<(LogicalPlan, Vec<Expr>, Option<Expr>)> {
let group_by_exprs = group_by
.iter()
.map(|e| self.sql_to_rex(e, &input.schema()))
.collect::<Result<Vec<Expr>>>()?;
let aggr_projection_exprs = group_by_exprs
.iter()
.chain(aggr_exprs.iter())
.cloned()
.collect::<Vec<Expr>>();
let plan = LogicalPlanBuilder::from(&input)
.aggregate(group_by_exprs, aggr_exprs)?
.build()?;
// After aggregation, these are all of the columns that will be
// available to next phases of planning.
let column_exprs_post_aggr = aggr_projection_exprs
.iter()
.map(|expr| expr_as_column_expr(expr, input))
.collect::<Result<Vec<Expr>>>()?;
// Rewrite the SELECT expression to use the columns produced by the
// aggregation.
let select_exprs_post_aggr = select_exprs
.iter()
.map(|expr| rebase_expr(expr, &aggr_projection_exprs, input))
.collect::<Result<Vec<Expr>>>()?;
if !can_columns_satisfy_exprs(&column_exprs_post_aggr, &select_exprs_post_aggr)? {
return Err(DataFusionError::Plan(
"Projection references non-aggregate values".to_owned(),
));
}
// Rewrite the HAVING expression to use the columns produced by the
// aggregation.
let having_expr_post_aggr_opt = if let Some(having_expr) = having_expr_opt {
let having_expr_post_aggr =
rebase_expr(having_expr, &aggr_projection_exprs, input)?;
if !can_columns_satisfy_exprs(
&column_exprs_post_aggr,
&[having_expr_post_aggr.clone()],
)? {
return Err(DataFusionError::Plan(
"Having references non-aggregate values".to_owned(),
));
}
Some(having_expr_post_aggr)
} else {
None
};
Ok((plan, select_exprs_post_aggr, having_expr_post_aggr_opt))
}
/// Wrap a plan in a limit
fn limit(&self, input: &LogicalPlan, limit: &Option<SQLExpr>) -> Result<LogicalPlan> {
match *limit {
Some(ref limit_expr) => {
let n = match self.sql_to_rex(&limit_expr, &input.schema())? {
Expr::Literal(ScalarValue::Int64(Some(n))) => Ok(n as usize),
_ => Err(DataFusionError::Plan(
"Unexpected expression for LIMIT clause".to_string(),
)),
}?;
LogicalPlanBuilder::from(&input).limit(n)?.build()
}
_ => Ok(input.clone()),
}
}
/// Wrap the logical in a sort
fn order_by(
&self,
plan: &LogicalPlan,
order_by: &[OrderByExpr],
) -> Result<LogicalPlan> {
if order_by.is_empty() {
return Ok(plan.clone());
}
let input_schema = plan.schema();
let order_by_rex: Result<Vec<Expr>> = order_by
.iter()
.map(|e| {
Ok(Expr::Sort {
expr: Box::new(self.sql_to_rex(&e.expr, &input_schema)?),
// by default asc
asc: e.asc.unwrap_or(true),
// by default nulls first to be consistent with spark
nulls_first: e.nulls_first.unwrap_or(true),
})
})
.collect();
LogicalPlanBuilder::from(&plan).sort(order_by_rex?)?.build()
}
/// Validate the schema provides all of the columns referenced in the expressions.
fn validate_schema_satisfies_exprs(
&self,
schema: &DFSchema,
exprs: &[Expr],
) -> Result<()> {
find_column_exprs(exprs)
.iter()
.try_for_each(|col| match col {
Expr::Column(name) => {
schema.field_with_unqualified_name(&name).map_err(|_| {
DataFusionError::Plan(format!(
"Invalid identifier '{}' for schema {}",
name,
schema.to_string()
))
})?;
Ok(())
}
_ => Err(DataFusionError::Internal("Not a column".to_string())),
})
}
/// Generate a relational expression from a select SQL expression
fn sql_select_to_rex(&self, sql: &SelectItem, schema: &DFSchema) -> Result<Expr> {
match sql {
SelectItem::UnnamedExpr(expr) => self.sql_to_rex(expr, schema),
SelectItem::ExprWithAlias { expr, alias } => Ok(Alias(
Box::new(self.sql_to_rex(&expr, schema)?),
alias.value.clone(),
)),
SelectItem::Wildcard => Ok(Expr::Wildcard),
SelectItem::QualifiedWildcard(_) => Err(DataFusionError::NotImplemented(
"Qualified wildcards are not supported".to_string(),
)),
}
}
/// Generate a relational expression from a SQL expression
pub fn sql_to_rex(&self, sql: &SQLExpr, schema: &DFSchema) -> Result<Expr> {
let expr = self.sql_expr_to_logical_expr(sql)?;
self.validate_schema_satisfies_exprs(schema, &[expr.clone()])?;
Ok(expr)
}
fn sql_fn_arg_to_logical_expr(&self, sql: &FunctionArg) -> Result<Expr> {
match sql {
FunctionArg::Named { name: _, arg } => self.sql_expr_to_logical_expr(arg),
FunctionArg::Unnamed(value) => self.sql_expr_to_logical_expr(value),
}
}
fn sql_expr_to_logical_expr(&self, sql: &SQLExpr) -> Result<Expr> {
match sql {
SQLExpr::Value(Value::Number(n, _)) => match n.parse::<i64>() {
Ok(n) => Ok(lit(n)),
Err(_) => Ok(lit(n.parse::<f64>().unwrap())),
},
SQLExpr::Value(Value::SingleQuotedString(ref s)) => Ok(lit(s.clone())),
SQLExpr::Value(Value::Boolean(n)) => Ok(lit(*n)),
SQLExpr::Value(Value::Null) => Ok(Expr::Literal(ScalarValue::Utf8(None))),
SQLExpr::Extract { field, expr } => Ok(Expr::ScalarFunction {
fun: functions::BuiltinScalarFunction::DatePart,
args: vec![
Expr::Literal(ScalarValue::Utf8(Some(format!("{}", field)))),
self.sql_expr_to_logical_expr(expr)?,
],
}),
SQLExpr::Value(Value::Interval {
value,
leading_field,
leading_precision,
last_field,
fractional_seconds_precision,
}) => self.sql_interval_to_literal(
value,
leading_field,
leading_precision,
last_field,
fractional_seconds_precision,
),
SQLExpr::Identifier(ref id) => {
if &id.value[0..1] == "@" {
let var_names = vec![id.value.clone()];
Ok(Expr::ScalarVariable(var_names))
} else {
Ok(Expr::Column(id.value.to_string()))
}
}
SQLExpr::CompoundIdentifier(ids) => {
let mut var_names = vec![];
for id in ids {
var_names.push(id.value.clone());
}
if &var_names[0][0..1] == "@" {
Ok(Expr::ScalarVariable(var_names))
} else {
Err(DataFusionError::NotImplemented(format!(
"Unsupported compound identifier '{:?}'",
var_names,
)))
}
}
SQLExpr::Wildcard => Ok(Expr::Wildcard),
SQLExpr::Case {
operand,
conditions,
results,
else_result,
} => {
let expr = if let Some(e) = operand {
Some(Box::new(self.sql_expr_to_logical_expr(e)?))
} else {
None
};
let when_expr = conditions
.iter()
.map(|e| self.sql_expr_to_logical_expr(e))
.collect::<Result<Vec<_>>>()?;
let then_expr = results
.iter()
.map(|e| self.sql_expr_to_logical_expr(e))
.collect::<Result<Vec<_>>>()?;
let else_expr = if let Some(e) = else_result {
Some(Box::new(self.sql_expr_to_logical_expr(e)?))
} else {
None
};
Ok(Expr::Case {
expr,
when_then_expr: when_expr
.iter()
.zip(then_expr.iter())
.map(|(w, t)| (Box::new(w.to_owned()), Box::new(t.to_owned())))
.collect(),
else_expr,
})
}
SQLExpr::Cast {
ref expr,
ref data_type,
} => Ok(Expr::Cast {
expr: Box::new(self.sql_expr_to_logical_expr(&expr)?),
data_type: convert_data_type(data_type)?,
}),
SQLExpr::TryCast {
ref expr,
ref data_type,
} => Ok(Expr::TryCast {
expr: Box::new(self.sql_expr_to_logical_expr(&expr)?),
data_type: convert_data_type(data_type)?,
}),
SQLExpr::TypedString {
ref data_type,
ref value,
} => Ok(Expr::Cast {
expr: Box::new(lit(&**value)),
data_type: convert_data_type(data_type)?,
}),
SQLExpr::IsNull(ref expr) => {
Ok(Expr::IsNull(Box::new(self.sql_expr_to_logical_expr(expr)?)))
}
SQLExpr::IsNotNull(ref expr) => Ok(Expr::IsNotNull(Box::new(
self.sql_expr_to_logical_expr(expr)?,
))),
SQLExpr::UnaryOp { ref op, ref expr } => match op {
UnaryOperator::Not => {
Ok(Expr::Not(Box::new(self.sql_expr_to_logical_expr(expr)?)))
}
UnaryOperator::Plus => Ok(self.sql_expr_to_logical_expr(expr)?),
UnaryOperator::Minus => {
match expr.as_ref() {
// optimization: if it's a number literal, we applly the negative operator
// here directly to calculate the new literal.
SQLExpr::Value(Value::Number(n,_)) => match n.parse::<i64>() {
Ok(n) => Ok(lit(-n)),
Err(_) => Ok(lit(-n
.parse::<f64>()
.map_err(|_e| {
DataFusionError::Internal(format!(
"negative operator can be only applied to integer and float operands, got: {}",
n))
})?)),
},
// not a literal, apply negative operator on expression
_ => Ok(Expr::Negative(Box::new(self.sql_expr_to_logical_expr(expr)?))),
}
}
_ => Err(DataFusionError::NotImplemented(format!(
"Unsupported SQL unary operator {:?}",
op
))),
},
SQLExpr::Between {
ref expr,
ref negated,
ref low,
ref high,
} => Ok(Expr::Between {
expr: Box::new(self.sql_expr_to_logical_expr(&expr)?),
negated: *negated,
low: Box::new(self.sql_expr_to_logical_expr(&low)?),
high: Box::new(self.sql_expr_to_logical_expr(&high)?),
}),
SQLExpr::InList {
ref expr,
ref list,
ref negated,
} => {
let list_expr = list
.iter()
.map(|e| self.sql_expr_to_logical_expr(e))
.collect::<Result<Vec<_>>>()?;
Ok(Expr::InList {
expr: Box::new(self.sql_expr_to_logical_expr(&expr)?),
list: list_expr,
negated: *negated,
})
}
SQLExpr::BinaryOp {
ref left,
ref op,
ref right,
} => {
let operator = match *op {
BinaryOperator::Gt => Ok(Operator::Gt),
BinaryOperator::GtEq => Ok(Operator::GtEq),
BinaryOperator::Lt => Ok(Operator::Lt),
BinaryOperator::LtEq => Ok(Operator::LtEq),
BinaryOperator::Eq => Ok(Operator::Eq),
BinaryOperator::NotEq => Ok(Operator::NotEq),
BinaryOperator::Plus => Ok(Operator::Plus),
BinaryOperator::Minus => Ok(Operator::Minus),
BinaryOperator::Multiply => Ok(Operator::Multiply),
BinaryOperator::Divide => Ok(Operator::Divide),
BinaryOperator::Modulus => Ok(Operator::Modulus),
BinaryOperator::And => Ok(Operator::And),
BinaryOperator::Or => Ok(Operator::Or),
BinaryOperator::Like => Ok(Operator::Like),
BinaryOperator::NotLike => Ok(Operator::NotLike),
_ => Err(DataFusionError::NotImplemented(format!(
"Unsupported SQL binary operator {:?}",
op
))),
}?;
Ok(Expr::BinaryExpr {
left: Box::new(self.sql_expr_to_logical_expr(&left)?),
op: operator,
right: Box::new(self.sql_expr_to_logical_expr(&right)?),
})
}
SQLExpr::Function(function) => {
let name = if function.name.0.len() > 1 {
// DF doesn't handle compound identifiers
// (e.g. "foo.bar") for function names yet
function.name.to_string()
} else {
// if there is a quote style, then don't normalize
// the name, otherwise normalize to lowercase
let ident = &function.name.0[0];
match ident.quote_style {
Some(_) => ident.value.clone(),
None => ident.value.to_ascii_lowercase(),
}
};
// first, scalar built-in
if let Ok(fun) = functions::BuiltinScalarFunction::from_str(&name) {
let args = function
.args
.iter()
.map(|a| self.sql_fn_arg_to_logical_expr(a))
.collect::<Result<Vec<Expr>>>()?;
return Ok(Expr::ScalarFunction { fun, args });
};
// next, aggregate built-ins
if let Ok(fun) = aggregates::AggregateFunction::from_str(&name) {
let args = if fun == aggregates::AggregateFunction::Count {
function
.args
.iter()
.map(|a| match a {
FunctionArg::Unnamed(SQLExpr::Value(Value::Number(
_,
_,
))) => Ok(lit(1_u8)),
FunctionArg::Unnamed(SQLExpr::Wildcard) => Ok(lit(1_u8)),
_ => self.sql_fn_arg_to_logical_expr(a),
})
.collect::<Result<Vec<Expr>>>()?
} else {
function
.args
.iter()
.map(|a| self.sql_fn_arg_to_logical_expr(a))
.collect::<Result<Vec<Expr>>>()?
};
return Ok(Expr::AggregateFunction {
fun,
distinct: function.distinct,
args,
});
};
// finally, user-defined functions (UDF) and UDAF
match self.schema_provider.get_function_meta(&name) {
Some(fm) => {
let args = function
.args
.iter()
.map(|a| self.sql_fn_arg_to_logical_expr(a))
.collect::<Result<Vec<Expr>>>()?;
Ok(Expr::ScalarUDF { fun: fm, args })
}
None => match self.schema_provider.get_aggregate_meta(&name) {
Some(fm) => {
let args = function
.args
.iter()
.map(|a| self.sql_fn_arg_to_logical_expr(a))
.collect::<Result<Vec<Expr>>>()?;
Ok(Expr::AggregateUDF { fun: fm, args })
}
_ => Err(DataFusionError::Plan(format!(
"Invalid function '{}'",
name
))),
},
}
}
SQLExpr::Nested(e) => self.sql_expr_to_logical_expr(&e),
_ => Err(DataFusionError::NotImplemented(format!(
"Unsupported ast node {:?} in sqltorel",
sql
))),
}
}
fn sql_interval_to_literal(
&self,
value: &str,
leading_field: &Option<DateTimeField>,
leading_precision: &Option<u64>,
last_field: &Option<DateTimeField>,
fractional_seconds_precision: &Option<u64>,
) -> Result<Expr> {
if leading_field.is_some() {
return Err(DataFusionError::NotImplemented(format!(
"Unsupported Interval Expression with leading_field {:?}",
leading_field
)));
}
if leading_precision.is_some() {
return Err(DataFusionError::NotImplemented(format!(
"Unsupported Interval Expression with leading_precision {:?}",
leading_precision
)));
}
if last_field.is_some() {
return Err(DataFusionError::NotImplemented(format!(
"Unsupported Interval Expression with last_field {:?}",
last_field
)));
}
if fractional_seconds_precision.is_some() {
return Err(DataFusionError::NotImplemented(format!(
"Unsupported Interval Expression with fractional_seconds_precision {:?}",
fractional_seconds_precision
)));
}
const SECONDS_PER_HOUR: f32 = 3_600_f32;
const MILLIS_PER_SECOND: f32 = 1_000_f32;
// We are storing parts as integers, it's why we need to align parts fractional
// INTERVAL '0.5 MONTH' = 15 days, INTERVAL '1.5 MONTH' = 1 month 15 days
// INTERVAL '0.5 DAY' = 12 hours, INTERVAL '1.5 DAY' = 1 day 12 hours
let align_interval_parts = |month_part: f32,
mut day_part: f32,
mut milles_part: f32|
-> (i32, i32, f32) {
// Convert fractional month to days, It's not supported by Arrow types, but anyway
day_part += (month_part - (month_part as i32) as f32) * 30_f32;
// Convert fractional days to hours
milles_part += (day_part - ((day_part as i32) as f32))
* 24_f32
* SECONDS_PER_HOUR
* MILLIS_PER_SECOND;
(month_part as i32, day_part as i32, milles_part)
};
let calculate_from_part = |interval_period_str: &str,
interval_type: &str|
-> Result<(i32, i32, f32)> {
// @todo It's better to use Decimal in order to protect rounding errors
// Wait https://github.com/apache/arrow/pull/9232
let interval_period = match f32::from_str(interval_period_str) {
Ok(n) => n,
Err(_) => {
return Err(DataFusionError::SQL(ParserError(format!(
"Unsupported Interval Expression with value {:?}",
value
))))
}
};
if interval_period > (i32::MAX as f32) {
return Err(DataFusionError::NotImplemented(format!(
"Interval field value out of range: {:?}",
value
)));
}
match interval_type.to_lowercase().as_str() {
"year" => Ok(align_interval_parts(interval_period * 12_f32, 0.0, 0.0)),
"month" => Ok(align_interval_parts(interval_period, 0.0, 0.0)),
"day" | "days" => Ok(align_interval_parts(0.0, interval_period, 0.0)),
"hour" | "hours" => {
Ok((0, 0, interval_period * SECONDS_PER_HOUR * MILLIS_PER_SECOND))
}
"minutes" | "minute" => {
Ok((0, 0, interval_period * 60_f32 * MILLIS_PER_SECOND))
}
"seconds" | "second" => Ok((0, 0, interval_period * MILLIS_PER_SECOND)),
"milliseconds" | "millisecond" => Ok((0, 0, interval_period)),
_ => Err(DataFusionError::NotImplemented(format!(
"Invalid input syntax for type interval: {:?}",
value
))),
}
};
let mut result_month: i64 = 0;
let mut result_days: i64 = 0;
let mut result_millis: i64 = 0;
let mut parts = value.split_whitespace();
loop {
let interval_period_str = parts.next();
if interval_period_str.is_none() {
break;
}
let (diff_month, diff_days, diff_millis) = calculate_from_part(
interval_period_str.unwrap(),
parts.next().unwrap_or("second"),
)?;
result_month += diff_month as i64;
if result_month > (i32::MAX as i64) {
return Err(DataFusionError::NotImplemented(format!(
"Interval field value out of range: {:?}",
value
)));
}
result_days += diff_days as i64;
if result_days > (i32::MAX as i64) {
return Err(DataFusionError::NotImplemented(format!(
"Interval field value out of range: {:?}",
value
)));
}
result_millis += diff_millis as i64;
if result_millis > (i32::MAX as i64) {
return Err(DataFusionError::NotImplemented(format!(
"Interval field value out of range: {:?}",
value
)));
}
}
// Interval is tricky thing
// 1 day is not 24 hours because timezones, 1 year != 365/364! 30 days != 1 month
// The true way to store and calculate intervals is to store it as it defined
// Due the fact that Arrow supports only two types YearMonth (month) and DayTime (day, time)
// It's not possible to store complex intervals
// It's possible to do select (NOW() + INTERVAL '1 year') + INTERVAL '1 day'; as workaround
if result_month != 0 && (result_days != 0 || result_millis != 0) {
return Err(DataFusionError::NotImplemented(format!(
"DF does not support intervals that have both a Year/Month part as well as Days/Hours/Mins/Seconds: {:?}. Hint: try breaking the interval into two parts, one with Year/Month and the other with Days/Hours/Mins/Seconds - e.g. (NOW() + INTERVAL '1 year') + INTERVAL '1 day'",
value
)));
}
if result_month != 0 {
return Ok(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
result_month as i32,
))));
}
let result: i64 = (result_days << 32) | result_millis;
Ok(Expr::Literal(ScalarValue::IntervalDayTime(Some(result))))
}
fn show_variable_to_plan(&self, variable: &[Ident]) -> Result<LogicalPlan> {
// Special case SHOW TABLES
let variable = ObjectName(variable.to_vec()).to_string();
if variable.as_str().eq_ignore_ascii_case("tables") {
if self.has_table("information_schema", "tables") {
let rewrite =
DFParser::parse_sql("SELECT * FROM information_schema.tables;")?;
self.statement_to_plan(&rewrite[0])
} else {
Err(DataFusionError::Plan(
"SHOW TABLES is not supported unless information_schema is enabled"
.to_string(),
))
}
} else {
Err(DataFusionError::NotImplemented(format!(
"SHOW {} not implemented. Supported syntax: SHOW <TABLES>",
variable
)))
}
}
fn show_columns_to_plan(
&self,
extended: bool,
full: bool,
table_name: &ObjectName,
filter: Option<&ShowStatementFilter>,
) -> Result<LogicalPlan> {
if filter.is_some() {
return Err(DataFusionError::Plan(
"SHOW COLUMNS with WHERE or LIKE is not supported".to_string(),
));
}
if !self.has_table("information_schema", "columns") {
return Err(DataFusionError::Plan(
"SHOW COLUMNS is not supported unless information_schema is enabled"
.to_string(),
));
}
if self
.schema_provider
.get_table_provider(table_name.try_into()?)
.is_none()
{
return Err(DataFusionError::Plan(format!(
"Unknown relation for SHOW COLUMNS: {}",
table_name
)));
}
// Figure out the where clause
let columns = vec!["table_name", "table_schema", "table_catalog"].into_iter();
let where_clause = table_name
.0
.iter()
.rev()
.zip(columns)
.map(|(ident, column_name)| {
format!(r#"{} = '{}'"#, column_name, ident.to_string())
})
.collect::<Vec<_>>()
.join(" AND ");
// treat both FULL and EXTENDED as the same
let select_list = if full || extended {
"*"
} else {
"table_catalog, table_schema, table_name, column_name, data_type, is_nullable"
};
let query = format!(
"SELECT {} FROM information_schema.columns WHERE {}",
select_list, where_clause
);
let rewrite = DFParser::parse_sql(&query)?;
self.statement_to_plan(&rewrite[0])
}
/// Return true if there is a table provider available for "schema.table"
fn has_table(&self, schema: &str, table: &str) -> bool {
let tables_reference = TableReference::Partial { schema, table };
self.schema_provider
.get_table_provider(tables_reference)
.is_some()
}
}
/// Remove join expressions from a filter expression
fn remove_join_expressions(
expr: &Expr,
join_columns: &[(&str, &str)],
) -> Result<Option<Expr>> {
match expr {
Expr::BinaryExpr { left, op, right } => match op {
Operator::Eq => match (left.as_ref(), right.as_ref()) {
(Expr::Column(l), Expr::Column(r)) => {
if join_columns.contains(&(l, r)) || join_columns.contains(&(r, l)) {
Ok(None)
} else {
Ok(Some(expr.clone()))
}
}
_ => Ok(Some(expr.clone())),
},
Operator::And => {
let l = remove_join_expressions(left, join_columns)?;
let r = remove_join_expressions(right, join_columns)?;
match (l, r) {
(Some(ll), Some(rr)) => Ok(Some(and(ll, rr))),
(Some(ll), _) => Ok(Some(ll)),
(_, Some(rr)) => Ok(Some(rr)),
_ => Ok(None),
}
}
_ => Ok(Some(expr.clone())),
},
_ => Ok(Some(expr.clone())),
}
}
/// Parse equijoin ON condition which could be a single Eq or multiple conjunctive Eqs
///
/// Examples
///
/// foo = bar
/// foo = bar AND bar = baz AND ...
///
fn extract_join_keys(expr: &Expr, accum: &mut Vec<(String, String)>) -> Result<()> {
match expr {
Expr::BinaryExpr { left, op, right } => match op {
Operator::Eq => match (left.as_ref(), right.as_ref()) {
(Expr::Column(l), Expr::Column(r)) => {
accum.push((l.to_owned(), r.to_owned()));
Ok(())
}
other => Err(DataFusionError::SQL(ParserError(format!(
"Unsupported expression '{:?}' in JOIN condition",
other
)))),
},
Operator::And => {
extract_join_keys(left, accum)?;
extract_join_keys(right, accum)
}
other => Err(DataFusionError::SQL(ParserError(format!(
"Unsupported expression '{:?}' in JOIN condition",
other
)))),
},
other => Err(DataFusionError::SQL(ParserError(format!(
"Unsupported expression '{:?}' in JOIN condition",
other
)))),
}
}
/// Extract join keys from a WHERE clause
fn extract_possible_join_keys(
expr: &Expr,
accum: &mut Vec<(String, String)>,
) -> Result<()> {
match expr {
Expr::BinaryExpr { left, op, right } => match op {
Operator::Eq => match (left.as_ref(), right.as_ref()) {
(Expr::Column(l), Expr::Column(r)) => {
accum.push((l.to_owned(), r.to_owned()));
Ok(())
}
_ => Ok(()),
},
Operator::And => {
extract_possible_join_keys(left, accum)?;
extract_possible_join_keys(right, accum)
}
_ => Ok(()),
},
_ => Ok(()),
}
}
/// Convert SQL data type to relational representation of data type
pub fn convert_data_type(sql: &SQLDataType) -> Result<DataType> {
match sql {
SQLDataType::Boolean => Ok(DataType::Boolean),
SQLDataType::SmallInt => Ok(DataType::Int16),
SQLDataType::Int => Ok(DataType::Int32),
SQLDataType::BigInt => Ok(DataType::Int64),
SQLDataType::Float(_) | SQLDataType::Real => Ok(DataType::Float64),
SQLDataType::Double => Ok(DataType::Float64),
SQLDataType::Char(_) | SQLDataType::Varchar(_) => Ok(DataType::Utf8),
SQLDataType::Timestamp => Ok(DataType::Timestamp(TimeUnit::Nanosecond, None)),
SQLDataType::Date => Ok(DataType::Date32),
other => Err(DataFusionError::NotImplemented(format!(
"Unsupported SQL type {:?}",
other
))),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::datasource::empty::EmptyTable;
use crate::{logical_plan::create_udf, sql::parser::DFParser};
use functions::ScalarFunctionImplementation;
const PERSON_COLUMN_NAMES: &str =
"id, first_name, last_name, age, state, salary, birth_date";
#[test]
fn select_no_relation() {
quick_test(
"SELECT 1",
"Projection: Int64(1)\
\n EmptyRelation",
);
}
#[test]
fn select_column_does_not_exist() {
let sql = "SELECT doesnotexist FROM person";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'doesnotexist\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_repeated_column() {
let sql = "SELECT age, age FROM person";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projections require unique expression names but the expression \\\"#age\\\" at position 0 and \\\"#age\\\" at position 1 have the same name. Consider aliasing (\\\"AS\\\") one of them.\")",
format!("{:?}", err)
);
}
#[test]
fn select_wildcard_with_repeated_column() {
let sql = "SELECT *, age FROM person";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projections require unique expression names but the expression \\\"#age\\\" at position 3 and \\\"#age\\\" at position 7 have the same name. Consider aliasing (\\\"AS\\\") one of them.\")",
format!("{:?}", err)
);
}
#[test]
fn select_wildcard_with_repeated_column_but_is_aliased() {
quick_test(
"SELECT *, first_name AS fn from person",
"Projection: #id, #first_name, #last_name, #age, #state, #salary, #birth_date, #first_name AS fn\
\n TableScan: person projection=None",
);
}
#[test]
fn select_scalar_func_with_literal_no_relation() {
quick_test(
"SELECT sqrt(9)",
"Projection: sqrt(Int64(9))\
\n EmptyRelation",
);
}
#[test]
fn select_simple_filter() {
let sql = "SELECT id, first_name, last_name \
FROM person WHERE state = 'CO'";
let expected = "Projection: #id, #first_name, #last_name\
\n Filter: #state Eq Utf8(\"CO\")\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_filter_column_does_not_exist() {
let sql = "SELECT first_name FROM person WHERE doesnotexist = 'A'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'doesnotexist\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_filter_cannot_use_alias() {
let sql = "SELECT first_name AS x FROM person WHERE x = 'A'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'x\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_neg_filter() {
let sql = "SELECT id, first_name, last_name \
FROM person WHERE NOT state";
let expected = "Projection: #id, #first_name, #last_name\
\n Filter: NOT #state\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_compound_filter() {
let sql = "SELECT id, first_name, last_name \
FROM person WHERE state = 'CO' AND age >= 21 AND age <= 65";
let expected = "Projection: #id, #first_name, #last_name\
\n Filter: #state Eq Utf8(\"CO\") And #age GtEq Int64(21) And #age LtEq Int64(65)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn test_timestamp_filter() {
let sql =
"SELECT state FROM person WHERE birth_date < CAST (158412331400600000 as timestamp)";
let expected = "Projection: #state\
\n Filter: #birth_date Lt CAST(Int64(158412331400600000) AS Timestamp(Nanosecond, None))\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn test_date_filter() {
let sql =
"SELECT state FROM person WHERE birth_date < CAST ('2020-01-01' as date)";
let expected = "Projection: #state\
\n Filter: #birth_date Lt CAST(Utf8(\"2020-01-01\") AS Date32)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_all_boolean_operators() {
let sql = "SELECT age, first_name, last_name \
FROM person \
WHERE age = 21 \
AND age != 21 \
AND age > 21 \
AND age >= 21 \
AND age < 65 \
AND age <= 65";
let expected = "Projection: #age, #first_name, #last_name\
\n Filter: #age Eq Int64(21) \
And #age NotEq Int64(21) \
And #age Gt Int64(21) \
And #age GtEq Int64(21) \
And #age Lt Int64(65) \
And #age LtEq Int64(65)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_between() {
let sql = "SELECT state FROM person WHERE age BETWEEN 21 AND 65";
let expected = "Projection: #state\
\n Filter: #age BETWEEN Int64(21) AND Int64(65)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_between_negated() {
let sql = "SELECT state FROM person WHERE age NOT BETWEEN 21 AND 65";
let expected = "Projection: #state\
\n Filter: #age NOT BETWEEN Int64(21) AND Int64(65)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_nested() {
let sql = "SELECT fn2, last_name
FROM (
SELECT fn1 as fn2, last_name, birth_date
FROM (
SELECT first_name AS fn1, last_name, birth_date, age
FROM person
)
)";
let expected = "Projection: #fn2, #last_name\
\n Projection: #fn1 AS fn2, #last_name, #birth_date\
\n Projection: #first_name AS fn1, #last_name, #birth_date, #age\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_nested_with_filters() {
let sql = "SELECT fn1, age
FROM (
SELECT first_name AS fn1, age
FROM person
WHERE age > 20
)
WHERE fn1 = 'X' AND age < 30";
let expected = "Projection: #fn1, #age\
\n Filter: #fn1 Eq Utf8(\"X\") And #age Lt Int64(30)\
\n Projection: #first_name AS fn1, #age\
\n Filter: #age Gt Int64(20)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_with_having() {
let sql = "SELECT id, age
FROM person
HAVING age > 100 AND age < 200";
let expected = "Projection: #id, #age\
\n Filter: #age Gt Int64(100) And #age Lt Int64(200)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_with_having_referencing_column_not_in_select() {
let sql = "SELECT id, age
FROM person
HAVING first_name = 'M'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Having references column(s) not provided by the select\")",
format!("{:?}", err)
);
}
#[test]
fn select_with_having_referencing_column_nested_in_select_expression() {
let sql = "SELECT id, age + 1
FROM person
HAVING age > 100";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Having references column(s) not provided by the select\")",
format!("{:?}", err)
);
}
#[test]
fn select_with_having_with_aggregate_not_in_select() {
let sql = "SELECT first_name
FROM person
HAVING MAX(age) > 100";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projection references non-aggregate values\")",
format!("{:?}", err)
);
}
#[test]
fn select_aggregate_with_having_that_reuses_aggregate() {
let sql = "SELECT MAX(age)
FROM person
HAVING MAX(age) < 30";
let expected = "Projection: #MAX(age)\
\n Filter: #MAX(age) Lt Int64(30)\
\n Aggregate: groupBy=[[]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_having_with_aggregate_not_in_select() {
let sql = "SELECT MAX(age)
FROM person
HAVING MAX(first_name) > 'M'";
let expected = "Projection: #MAX(age)\
\n Filter: #MAX(first_name) Gt Utf8(\"M\")\
\n Aggregate: groupBy=[[]], aggr=[[MAX(#age), MAX(#first_name)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_having_referencing_column_not_in_select() {
let sql = "SELECT COUNT(*)
FROM person
HAVING first_name = 'M'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Having references non-aggregate values\")",
format!("{:?}", err)
);
}
#[test]
fn select_aggregate_aliased_with_having_referencing_aggregate_by_its_alias() {
let sql = "SELECT MAX(age) as max_age
FROM person
HAVING max_age < 30";
let expected = "Projection: #MAX(age) AS max_age\
\n Filter: #MAX(age) Lt Int64(30)\
\n Aggregate: groupBy=[[]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_aliased_with_having_that_reuses_aggregate_but_not_by_its_alias() {
let sql = "SELECT MAX(age) as max_age
FROM person
HAVING MAX(age) < 30";
let expected = "Projection: #MAX(age) AS max_age\
\n Filter: #MAX(age) Lt Int64(30)\
\n Aggregate: groupBy=[[]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having() {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING first_name = 'M'";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #first_name Eq Utf8(\"M\")\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_and_where() {
let sql = "SELECT first_name, MAX(age)
FROM person
WHERE id > 5
GROUP BY first_name
HAVING MAX(age) < 100";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Lt Int64(100)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n Filter: #id Gt Int64(5)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_and_where_filtering_on_aggregate_column(
) {
let sql = "SELECT first_name, MAX(age)
FROM person
WHERE id > 5 AND age > 18
GROUP BY first_name
HAVING MAX(age) < 100";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Lt Int64(100)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n Filter: #id Gt Int64(5) And #age Gt Int64(18)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_using_column_by_alias() {
let sql = "SELECT first_name AS fn, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 2 AND fn = 'M'";
let expected = "Projection: #first_name AS fn, #MAX(age)\
\n Filter: #MAX(age) Gt Int64(2) And #first_name Eq Utf8(\"M\")\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_using_columns_with_and_without_their_aliases(
) {
let sql = "SELECT first_name AS fn, MAX(age) AS max_age
FROM person
GROUP BY first_name
HAVING MAX(age) > 2 AND max_age < 5 AND first_name = 'M' AND fn = 'N'";
let expected = "Projection: #first_name AS fn, #MAX(age) AS max_age\
\n Filter: #MAX(age) Gt Int64(2) And #MAX(age) Lt Int64(5) And #first_name Eq Utf8(\"M\") And #first_name Eq Utf8(\"N\")\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_that_reuses_aggregate() {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 100";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Gt Int64(100)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_referencing_column_not_in_group_by() {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 10 AND last_name = 'M'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Having references non-aggregate values\")",
format!("{:?}", err)
);
}
#[test]
fn select_aggregate_with_group_by_with_having_that_reuses_aggregate_multiple_times() {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 100 AND MAX(age) < 200";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Gt Int64(100) And #MAX(age) Lt Int64(200)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_using_aggreagate_not_in_select() {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 100 AND MIN(id) < 50";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Gt Int64(100) And #MIN(id) Lt Int64(50)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age), MIN(#id)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_aliased_with_group_by_with_having_referencing_aggregate_by_its_alias(
) {
let sql = "SELECT first_name, MAX(age) AS max_age
FROM person
GROUP BY first_name
HAVING max_age > 100";
let expected = "Projection: #first_name, #MAX(age) AS max_age\
\n Filter: #MAX(age) Gt Int64(100)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_compound_aliased_with_group_by_with_having_referencing_compound_aggregate_by_its_alias(
) {
let sql = "SELECT first_name, MAX(age) + 1 AS max_age_plus_one
FROM person
GROUP BY first_name
HAVING max_age_plus_one > 100";
let expected =
"Projection: #first_name, #MAX(age) Plus Int64(1) AS max_age_plus_one\
\n Filter: #MAX(age) Plus Int64(1) Gt Int64(100)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_using_derived_column_aggreagate_not_in_select(
) {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 100 AND MIN(id - 2) < 50";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Gt Int64(100) And #MIN(id Minus Int64(2)) Lt Int64(50)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age), MIN(#id Minus Int64(2))]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aggregate_with_group_by_with_having_using_count_star_not_in_select() {
let sql = "SELECT first_name, MAX(age)
FROM person
GROUP BY first_name
HAVING MAX(age) > 100 AND COUNT(*) < 50";
let expected = "Projection: #first_name, #MAX(age)\
\n Filter: #MAX(age) Gt Int64(100) And #COUNT(UInt8(1)) Lt Int64(50)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#age), COUNT(UInt8(1))]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_binary_expr() {
let sql = "SELECT age + salary from person";
let expected = "Projection: #age Plus #salary\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_binary_expr_nested() {
let sql = "SELECT (age + salary)/2 from person";
let expected = "Projection: #age Plus #salary Divide Int64(2)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_wildcard_with_groupby() {
quick_test(
"SELECT * FROM person GROUP BY id, first_name, last_name, age, state, salary, birth_date",
"Projection: #id, #first_name, #last_name, #age, #state, #salary, #birth_date\
\n Aggregate: groupBy=[[#id, #first_name, #last_name, #age, #state, #salary, #birth_date]], aggr=[[]]\
\n TableScan: person projection=None",
);
quick_test(
"SELECT * FROM (SELECT first_name, last_name FROM person) GROUP BY first_name, last_name",
"Projection: #first_name, #last_name\
\n Aggregate: groupBy=[[#first_name, #last_name]], aggr=[[]]\
\n Projection: #first_name, #last_name\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate() {
quick_test(
"SELECT MIN(age) FROM person",
"Projection: #MIN(age)\
\n Aggregate: groupBy=[[]], aggr=[[MIN(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn test_sum_aggregate() {
quick_test(
"SELECT SUM(age) from person",
"Projection: #SUM(age)\
\n Aggregate: groupBy=[[]], aggr=[[SUM(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_column_does_not_exist() {
let sql = "SELECT MIN(doesnotexist) FROM person";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'doesnotexist\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_repeated_aggregate() {
let sql = "SELECT MIN(age), MIN(age) FROM person";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projections require unique expression names but the expression \\\"#MIN(age)\\\" at position 0 and \\\"#MIN(age)\\\" at position 1 have the same name. Consider aliasing (\\\"AS\\\") one of them.\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_repeated_aggregate_with_single_alias() {
quick_test(
"SELECT MIN(age), MIN(age) AS a FROM person",
"Projection: #MIN(age), #MIN(age) AS a\
\n Aggregate: groupBy=[[]], aggr=[[MIN(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_repeated_aggregate_with_unique_aliases() {
quick_test(
"SELECT MIN(age) AS a, MIN(age) AS b FROM person",
"Projection: #MIN(age) AS a, #MIN(age) AS b\
\n Aggregate: groupBy=[[]], aggr=[[MIN(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_repeated_aggregate_with_repeated_aliases() {
let sql = "SELECT MIN(age) AS a, MIN(age) AS a FROM person";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projections require unique expression names but the expression \\\"#MIN(age) AS a\\\" at position 0 and \\\"#MIN(age) AS a\\\" at position 1 have the same name. Consider aliasing (\\\"AS\\\") one of them.\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby() {
quick_test(
"SELECT state, MIN(age), MAX(age) FROM person GROUP BY state",
"Projection: #state, #MIN(age), #MAX(age)\
\n Aggregate: groupBy=[[#state]], aggr=[[MIN(#age), MAX(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_with_aliases() {
quick_test(
"SELECT state AS a, MIN(age) AS b FROM person GROUP BY state",
"Projection: #state AS a, #MIN(age) AS b\
\n Aggregate: groupBy=[[#state]], aggr=[[MIN(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_with_aliases_repeated() {
let sql = "SELECT state AS a, MIN(age) AS a FROM person GROUP BY state";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projections require unique expression names but the expression \\\"#state AS a\\\" at position 0 and \\\"#MIN(age) AS a\\\" at position 1 have the same name. Consider aliasing (\\\"AS\\\") one of them.\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby_column_unselected() {
quick_test(
"SELECT MIN(age), MAX(age) FROM person GROUP BY state",
"Projection: #MIN(age), #MAX(age)\
\n Aggregate: groupBy=[[#state]], aggr=[[MIN(#age), MAX(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_and_column_in_group_by_does_not_exist() {
let sql = "SELECT SUM(age) FROM person GROUP BY doesnotexist";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'doesnotexist\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby_and_column_in_aggregate_does_not_exist() {
let sql = "SELECT SUM(doesnotexist) FROM person GROUP BY first_name";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'doesnotexist\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_interval_out_of_range() {
let sql = "SELECT INTERVAL '100000000000000000 day'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"NotImplemented(\"Interval field value out of range: \\\"100000000000000000 day\\\"\")",
format!("{:?}", err)
);
}
#[test]
fn select_unsupported_complex_interval() {
let sql = "SELECT INTERVAL '1 year 1 day'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"NotImplemented(\"DF does not support intervals that have both a Year/Month part as well as Days/Hours/Mins/Seconds: \\\"1 year 1 day\\\". Hint: try breaking the interval into two parts, one with Year/Month and the other with Days/Hours/Mins/Seconds - e.g. (NOW() + INTERVAL \\\'1 year\\\') + INTERVAL \\\'1 day\\\'\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby_and_column_is_in_aggregate_and_groupby() {
quick_test(
"SELECT MAX(first_name) FROM person GROUP BY first_name",
"Projection: #MAX(first_name)\
\n Aggregate: groupBy=[[#first_name]], aggr=[[MAX(#first_name)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_cannot_use_alias() {
let sql = "SELECT state AS x, MAX(age) FROM person GROUP BY x";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
format!(
"Plan(\"Invalid identifier \\\'x\\\' for schema {}\")",
PERSON_COLUMN_NAMES
),
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby_aggregate_repeated() {
let sql = "SELECT state, MIN(age), MIN(age) FROM person GROUP BY state";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projections require unique expression names but the expression \\\"#MIN(age)\\\" at position 1 and \\\"#MIN(age)\\\" at position 2 have the same name. Consider aliasing (\\\"AS\\\") one of them.\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby_aggregate_repeated_and_one_has_alias() {
quick_test(
"SELECT state, MIN(age), MIN(age) AS ma FROM person GROUP BY state",
"Projection: #state, #MIN(age), #MIN(age) AS ma\
\n Aggregate: groupBy=[[#state]], aggr=[[MIN(#age)]]\
\n TableScan: person projection=None",
)
}
#[test]
fn select_simple_aggregate_with_groupby_non_column_expression_unselected() {
quick_test(
"SELECT MIN(first_name) FROM person GROUP BY age + 1",
"Projection: #MIN(first_name)\
\n Aggregate: groupBy=[[#age Plus Int64(1)]], aggr=[[MIN(#first_name)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_non_column_expression_selected_and_resolvable(
) {
quick_test(
"SELECT age + 1, MIN(first_name) FROM person GROUP BY age + 1",
"Projection: #age Plus Int64(1), #MIN(first_name)\
\n Aggregate: groupBy=[[#age Plus Int64(1)]], aggr=[[MIN(#first_name)]]\
\n TableScan: person projection=None",
);
quick_test(
"SELECT MIN(first_name), age + 1 FROM person GROUP BY age + 1",
"Projection: #MIN(first_name), #age Plus Int64(1)\
\n Aggregate: groupBy=[[#age Plus Int64(1)]], aggr=[[MIN(#first_name)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_non_column_expression_nested_and_resolvable()
{
quick_test(
"SELECT ((age + 1) / 2) * (age + 1), MIN(first_name) FROM person GROUP BY age + 1",
"Projection: #age Plus Int64(1) Divide Int64(2) Multiply #age Plus Int64(1), #MIN(first_name)\
\n Aggregate: groupBy=[[#age Plus Int64(1)]], aggr=[[MIN(#first_name)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_with_groupby_non_column_expression_nested_and_not_resolvable(
) {
// The query should fail, because age + 9 is not in the group by.
let sql =
"SELECT ((age + 1) / 2) * (age + 9), MIN(first_name) FROM person GROUP BY age + 1";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projection references non-aggregate values\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_with_groupby_non_column_expression_and_its_column_selected(
) {
let sql = "SELECT age, MIN(first_name) FROM person GROUP BY age + 1";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projection references non-aggregate values\")",
format!("{:?}", err)
);
}
#[test]
fn select_simple_aggregate_nested_in_binary_expr_with_groupby() {
quick_test(
"SELECT state, MIN(age) < 10 FROM person GROUP BY state",
"Projection: #state, #MIN(age) Lt Int64(10)\
\n Aggregate: groupBy=[[#state]], aggr=[[MIN(#age)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_simple_aggregate_and_nested_groupby_column() {
quick_test(
"SELECT age + 1, MAX(first_name) FROM person GROUP BY age",
"Projection: #age Plus Int64(1), #MAX(first_name)\
\n Aggregate: groupBy=[[#age]], aggr=[[MAX(#first_name)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_aggregate_compounded_with_groupby_column() {
quick_test(
"SELECT age + MIN(salary) FROM person GROUP BY age",
"Projection: #age Plus #MIN(salary)\
\n Aggregate: groupBy=[[#age]], aggr=[[MIN(#salary)]]\
\n TableScan: person projection=None",
);
}
#[test]
fn select_aggregate_with_non_column_inner_expression_with_groupby() {
quick_test(
"SELECT state, MIN(age + 1) FROM person GROUP BY state",
"Projection: #state, #MIN(age Plus Int64(1))\
\n Aggregate: groupBy=[[#state]], aggr=[[MIN(#age Plus Int64(1))]]\
\n TableScan: person projection=None",
);
}
#[test]
fn test_wildcard() {
quick_test(
"SELECT * from person",
"Projection: #id, #first_name, #last_name, #age, #state, #salary, #birth_date\
\n TableScan: person projection=None",
);
}
#[test]
fn select_count_one() {
let sql = "SELECT COUNT(1) FROM person";
let expected = "Projection: #COUNT(UInt8(1))\
\n Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1))]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_count_column() {
let sql = "SELECT COUNT(id) FROM person";
let expected = "Projection: #COUNT(id)\
\n Aggregate: groupBy=[[]], aggr=[[COUNT(#id)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_scalar_func() {
let sql = "SELECT sqrt(age) FROM person";
let expected = "Projection: sqrt(#age)\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_aliased_scalar_func() {
let sql = "SELECT sqrt(age) AS square_people FROM person";
let expected = "Projection: sqrt(#age) AS square_people\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_where_nullif_division() {
let sql = "SELECT c3/(c4+c5) \
FROM aggregate_test_100 WHERE c3/nullif(c4+c5, 0) > 0.1";
let expected = "Projection: #c3 Divide #c4 Plus #c5\
\n Filter: #c3 Divide nullif(#c4 Plus #c5, Int64(0)) Gt Float64(0.1)\
\n TableScan: aggregate_test_100 projection=None";
quick_test(sql, expected);
}
#[test]
fn select_where_with_negative_operator() {
let sql = "SELECT c3 FROM aggregate_test_100 WHERE c3 > -0.1 AND -c4 > 0";
let expected = "Projection: #c3\
\n Filter: #c3 Gt Float64(-0.1) And (- #c4) Gt Int64(0)\
\n TableScan: aggregate_test_100 projection=None";
quick_test(sql, expected);
}
#[test]
fn select_where_with_positive_operator() {
let sql = "SELECT c3 FROM aggregate_test_100 WHERE c3 > +0.1 AND +c4 > 0";
let expected = "Projection: #c3\
\n Filter: #c3 Gt Float64(0.1) And #c4 Gt Int64(0)\
\n TableScan: aggregate_test_100 projection=None";
quick_test(sql, expected);
}
#[test]
fn select_order_by() {
let sql = "SELECT id FROM person ORDER BY id";
let expected = "Sort: #id ASC NULLS FIRST\
\n Projection: #id\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_order_by_desc() {
let sql = "SELECT id FROM person ORDER BY id DESC";
let expected = "Sort: #id DESC NULLS FIRST\
\n Projection: #id\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_order_by_nulls_last() {
quick_test(
"SELECT id FROM person ORDER BY id DESC NULLS LAST",
"Sort: #id DESC NULLS LAST\
\n Projection: #id\
\n TableScan: person projection=None",
);
quick_test(
"SELECT id FROM person ORDER BY id NULLS LAST",
"Sort: #id ASC NULLS LAST\
\n Projection: #id\
\n TableScan: person projection=None",
);
}
#[test]
fn select_group_by() {
let sql = "SELECT state FROM person GROUP BY state";
let expected = "Projection: #state\
\n Aggregate: groupBy=[[#state]], aggr=[[]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_group_by_columns_not_in_select() {
let sql = "SELECT MAX(age) FROM person GROUP BY state";
let expected = "Projection: #MAX(age)\
\n Aggregate: groupBy=[[#state]], aggr=[[MAX(#age)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_group_by_count_star() {
let sql = "SELECT state, COUNT(*) FROM person GROUP BY state";
let expected = "Projection: #state, #COUNT(UInt8(1))\
\n Aggregate: groupBy=[[#state]], aggr=[[COUNT(UInt8(1))]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_group_by_needs_projection() {
let sql = "SELECT COUNT(state), state FROM person GROUP BY state";
let expected = "\
Projection: #COUNT(state), #state\
\n Aggregate: groupBy=[[#state]], aggr=[[COUNT(#state)]]\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
#[test]
fn select_7480_1() {
let sql = "SELECT c1, MIN(c12) FROM aggregate_test_100 GROUP BY c1, c13";
let expected = "Projection: #c1, #MIN(c12)\
\n Aggregate: groupBy=[[#c1, #c13]], aggr=[[MIN(#c12)]]\
\n TableScan: aggregate_test_100 projection=None";
quick_test(sql, expected);
}
#[test]
fn select_7480_2() {
let sql = "SELECT c1, c13, MIN(c12) FROM aggregate_test_100 GROUP BY c1";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Projection references non-aggregate values\")",
format!("{:?}", err)
);
}
#[test]
fn create_external_table_csv() {
let sql = "CREATE EXTERNAL TABLE t(c1 int) STORED AS CSV LOCATION 'foo.csv'";
let expected = "CreateExternalTable: \"t\"";
quick_test(sql, expected);
}
#[test]
fn create_external_table_csv_no_schema() {
let sql = "CREATE EXTERNAL TABLE t STORED AS CSV LOCATION 'foo.csv'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Column definitions required for CSV files. None found\")",
format!("{:?}", err)
);
}
#[test]
fn create_external_table_parquet() {
let sql =
"CREATE EXTERNAL TABLE t(c1 int) STORED AS PARQUET LOCATION 'foo.parquet'";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"Column definitions can not be specified for PARQUET files.\")",
format!("{:?}", err)
);
}
#[test]
fn create_external_table_parquet_no_schema() {
let sql = "CREATE EXTERNAL TABLE t STORED AS PARQUET LOCATION 'foo.parquet'";
let expected = "CreateExternalTable: \"t\"";
quick_test(sql, expected);
}
#[test]
fn equijoin_explicit_syntax() {
let sql = "SELECT id, order_id \
FROM person \
JOIN orders \
ON id = customer_id";
let expected = "Projection: #id, #order_id\
\n Join: id = customer_id\
\n TableScan: person projection=None\
\n TableScan: orders projection=None";
quick_test(sql, expected);
}
#[test]
fn equijoin_explicit_syntax_3_tables() {
let sql = "SELECT id, order_id, l_description \
FROM person \
JOIN orders ON id = customer_id \
JOIN lineitem ON o_item_id = l_item_id";
let expected = "Projection: #id, #order_id, #l_description\
\n Join: o_item_id = l_item_id\
\n Join: id = customer_id\
\n TableScan: person projection=None\
\n TableScan: orders projection=None\
\n TableScan: lineitem projection=None";
quick_test(sql, expected);
}
#[test]
fn boolean_literal_in_condition_expression() {
let sql = "SELECT order_id \
FROM orders \
WHERE delivered = false OR delivered = true";
let expected = "Projection: #order_id\
\n Filter: #delivered Eq Boolean(false) Or #delivered Eq Boolean(true)\
\n TableScan: orders projection=None";
quick_test(sql, expected);
}
#[test]
fn union() {
let sql = "SELECT order_id from orders UNION ALL SELECT order_id FROM orders";
let expected = "Union\
\n Projection: #order_id\
\n TableScan: orders projection=None\
\n Projection: #order_id\
\n TableScan: orders projection=None";
quick_test(sql, expected);
}
#[test]
fn union_4_combined_in_one() {
let sql = "SELECT order_id from orders
UNION ALL SELECT order_id FROM orders
UNION ALL SELECT order_id FROM orders
UNION ALL SELECT order_id FROM orders";
let expected = "Union\
\n Projection: #order_id\
\n TableScan: orders projection=None\
\n Projection: #order_id\
\n TableScan: orders projection=None\
\n Projection: #order_id\
\n TableScan: orders projection=None\
\n Projection: #order_id\
\n TableScan: orders projection=None";
quick_test(sql, expected);
}
#[test]
fn union_schemas_should_be_same() {
let sql = "SELECT order_id from orders UNION ALL SELECT customer_id FROM orders";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"Plan(\"UNION ALL schemas are expected to be the same\")",
format!("{:?}", err)
);
}
#[test]
fn only_union_all_supported() {
let sql = "SELECT order_id from orders EXCEPT SELECT order_id FROM orders";
let err = logical_plan(sql).expect_err("query should have failed");
assert_eq!(
"NotImplemented(\"Only UNION ALL is supported, found EXCEPT\")",
format!("{:?}", err)
);
}
#[test]
fn select_typedstring() {
let sql = "SELECT date '2020-12-10' AS date FROM person";
let expected = "Projection: CAST(Utf8(\"2020-12-10\") AS Date32) AS date\
\n TableScan: person projection=None";
quick_test(sql, expected);
}
fn logical_plan(sql: &str) -> Result<LogicalPlan> {
let planner = SqlToRel::new(&MockContextProvider {});
let result = DFParser::parse_sql(&sql);
let ast = result.unwrap();
planner.statement_to_plan(&ast[0])
}
/// Create logical plan, write with formatter, compare to expected output
fn quick_test(sql: &str, expected: &str) {
let plan = logical_plan(sql).unwrap();
assert_eq!(expected, format!("{:?}", plan));
}
struct MockContextProvider {}
impl ContextProvider for MockContextProvider {
fn get_table_provider(
&self,
name: TableReference,
) -> Option<Arc<dyn TableProvider>> {
let schema = match name.table() {
"person" => Some(Schema::new(vec![
Field::new("id", DataType::UInt32, false),
Field::new("first_name", DataType::Utf8, false),
Field::new("last_name", DataType::Utf8, false),
Field::new("age", DataType::Int32, false),
Field::new("state", DataType::Utf8, false),
Field::new("salary", DataType::Float64, false),
Field::new(
"birth_date",
DataType::Timestamp(TimeUnit::Nanosecond, None),
false,
),
])),
"orders" => Some(Schema::new(vec![
Field::new("order_id", DataType::UInt32, false),
Field::new("customer_id", DataType::UInt32, false),
Field::new("o_item_id", DataType::Utf8, false),
Field::new("qty", DataType::Int32, false),
Field::new("price", DataType::Float64, false),
Field::new("delivered", DataType::Boolean, false),
])),
"lineitem" => Some(Schema::new(vec![
Field::new("l_item_id", DataType::UInt32, false),
Field::new("l_description", DataType::Utf8, false),
])),
"aggregate_test_100" => Some(Schema::new(vec![
Field::new("c1", DataType::Utf8, false),
Field::new("c2", DataType::UInt32, false),
Field::new("c3", DataType::Int8, false),
Field::new("c4", DataType::Int16, false),
Field::new("c5", DataType::Int32, false),
Field::new("c6", DataType::Int64, false),
Field::new("c7", DataType::UInt8, false),
Field::new("c8", DataType::UInt16, false),
Field::new("c9", DataType::UInt32, false),
Field::new("c10", DataType::UInt64, false),
Field::new("c11", DataType::Float32, false),
Field::new("c12", DataType::Float64, false),
Field::new("c13", DataType::Utf8, false),
])),
_ => None,
};
schema.map(|s| -> Arc<dyn TableProvider> {
Arc::new(EmptyTable::new(Arc::new(s)))
})
}
fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> {
let f: ScalarFunctionImplementation =
Arc::new(|_| Err(DataFusionError::NotImplemented("".to_string())));
match name {
"my_sqrt" => Some(Arc::new(create_udf(
"my_sqrt",
vec![DataType::Float64],
Arc::new(DataType::Float64),
f,
))),
_ => None,
}
}
fn get_aggregate_meta(&self, _name: &str) -> Option<Arc<AggregateUDF>> {
unimplemented!()
}
}
}
| 38.35962 | 332 | 0.511626 |
fef7c2d3511702d7ba3926d3a53d7050d09ad174 | 7,013 | use std::env;
use std::fs;
use std::convert::AsRef;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::collections::HashMap;
use url::Url;
use errors::Result;
use types::{SourceMap, RawToken, Token};
/// Helper for sourcemap generation
///
/// This helper exists because generating and modifying `SourceMap`
/// objects is generally not very comfortable. As a general aid this
/// type can help.
pub struct SourceMapBuilder {
file: Option<String>,
name_map: HashMap<String, u32>,
names: Vec<String>,
tokens: Vec<RawToken>,
source_map: HashMap<String, u32>,
sources: Vec<String>,
source_contents: Vec<Option<String>>,
}
fn resolve_local_reference(base: &Url, reference: &str) -> Option<PathBuf> {
let url = match base.join(reference) {
Ok(url) => {
if url.scheme() != "file" {
return None;
}
url
}
Err(_) => {
return None;
}
};
url.to_file_path().ok()
}
impl SourceMapBuilder {
/// Creates a new source map builder and sets the file.
pub fn new(file: Option<&str>) -> SourceMapBuilder {
SourceMapBuilder {
file: file.map(|x| x.to_string()),
name_map: HashMap::new(),
names: vec![],
tokens: vec![],
source_map: HashMap::new(),
sources: vec![],
source_contents: vec![],
}
}
/// Sets the file for the sourcemap (optional)
pub fn set_file(&mut self, value: Option<&str>) {
self.file = value.map(|x| x.to_string());
}
/// Returns the currently set file.
pub fn get_file(&self) -> Option<&str> {
self.file.as_ref().map(|x| &x[..])
}
/// Registers a new source with the builder and returns the source ID.
pub fn add_source(&mut self, src: &str) -> u32 {
let count = self.sources.len() as u32;
let id = *self.source_map.entry(src.into()).or_insert(count);
if id == count {
self.sources.push(src.into());
}
id
}
/// Changes the source name for an already set source.
pub fn set_source(&mut self, src_id: u32, src: &str) {
assert!(src_id != !0, "Cannot set sources for tombstone source id");
self.sources[src_id as usize] = src.to_string();
}
/// Looks up a source name for an ID.
pub fn get_source(&self, src_id: u32) -> Option<&str> {
self.sources.get(src_id as usize).map(|x| &x[..])
}
/// Sets the source contents for an already existing source.
pub fn set_source_contents(&mut self, src_id: u32, contents: Option<&str>) {
assert!(src_id != !0, "Cannot set sources for tombstone source id");
if self.sources.len() > self.source_contents.len() {
self.source_contents.resize(self.sources.len(), None);
}
self.source_contents[src_id as usize] = contents.map(|x| x.to_string());
}
/// Returns the current source contents for a source.
pub fn get_source_contents(&self, src_id: u32) -> Option<&str> {
self.source_contents.get(src_id as usize).and_then(|x| x.as_ref().map(|x| &x[..]))
}
/// Checks if a given source ID has source contents available.
pub fn has_source_contents(&self, src_id: u32) -> bool {
self.get_source_contents(src_id).is_some()
}
/// Loads source contents from locally accessible files if referenced
/// accordingly. Returns the number of loaded source contents
pub fn load_local_source_contents(&mut self, base_path: Option<&Path>) -> Result<usize> {
let mut abs_path = env::current_dir()?;
if let Some(path) = base_path {
abs_path.push(path);
}
let base_url = Url::from_directory_path(&abs_path).unwrap();
let mut to_read = vec![];
for (source, &src_id) in self.source_map.iter() {
if self.has_source_contents(src_id) {
continue;
}
if let Some(path) = resolve_local_reference(&base_url, &source) {
to_read.push((src_id, path));
}
}
let rv = to_read.len();
for (src_id, path) in to_read {
if let Ok(mut f) = fs::File::open(&path) {
let mut contents = String::new();
if f.read_to_string(&mut contents).is_ok() {
self.set_source_contents(src_id, Some(&contents));
}
}
}
Ok(rv)
}
/// Registers a name with the builder and returns the name ID.
pub fn add_name(&mut self, name: &str) -> u32 {
let count = self.names.len() as u32;
let id = *self.name_map.entry(name.into()).or_insert(count);
if id == count {
self.names.push(name.into());
}
id
}
/// Adds a new mapping to the builder.
pub fn add(&mut self,
dst_line: u32,
dst_col: u32,
src_line: u32,
src_col: u32,
source: Option<&str>,
name: Option<&str>)
-> RawToken {
let src_id = match source {
Some(source) => self.add_source(source),
None => !0,
};
let name_id = match name {
Some(name) => self.add_name(name),
None => !0,
};
let raw = RawToken {
dst_line: dst_line,
dst_col: dst_col,
src_line: src_line,
src_col: src_col,
src_id: src_id,
name_id: name_id,
};
self.tokens.push(raw);
raw
}
/// Shortcut for adding a new mapping based of an already existing token,
/// optionally removing the name.
pub fn add_token(&mut self, token: &Token, with_name: bool) -> RawToken {
let name = if with_name { token.get_name() } else { None };
self.add(token.get_dst_line(),
token.get_dst_col(),
token.get_src_line(),
token.get_src_col(),
token.get_source(),
name)
}
/// Strips common prefixes from the sources in the builder
pub fn strip_prefixes<S: AsRef<str>>(&mut self, prefixes: &[S]) {
for source in self.sources.iter_mut() {
for prefix in prefixes {
let mut prefix = prefix.as_ref().to_string();
if !prefix.ends_with('/') {
prefix.push('/');
}
if source.starts_with(&prefix) {
*source = source[prefix.len()..].to_string();
break;
}
}
}
}
/// Converts the builder into a sourcemap.
pub fn into_sourcemap(self) -> SourceMap {
let contents = if self.source_contents.len() > 0 {
Some(self.source_contents)
} else {
None
};
SourceMap::new(self.file, self.tokens, self.names, self.sources, contents)
}
}
| 32.169725 | 93 | 0.54784 |
2faa30a0a0c2c023fe5cf78ae906761bce96b56b | 7,039 | //! Ergo transaction
use super::{
data_input::DataInput, digest32::Digest32, ergo_box::ErgoBoxCandidate, input::Input,
token::TokenId,
};
use indexmap::IndexSet;
#[cfg(test)]
use proptest_derive::Arbitrary;
#[cfg(feature = "with-serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sigma_ser::serializer::SerializationError;
use sigma_ser::serializer::SigmaSerializable;
use sigma_ser::vlq_encode;
use std::convert::TryFrom;
use std::io;
use std::iter::FromIterator;
/// Transaction id (ModifierId in sigmastate)
#[derive(PartialEq, Eq, Hash, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
#[cfg_attr(feature = "with-serde", derive(Serialize, Deserialize))]
pub struct TxId(pub Digest32);
impl SigmaSerializable for TxId {
fn sigma_serialize<W: vlq_encode::WriteSigmaVlqExt>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.sigma_serialize(w)?;
Ok(())
}
fn sigma_parse<R: vlq_encode::ReadSigmaVlqExt>(r: &mut R) -> Result<Self, SerializationError> {
Ok(Self(Digest32::sigma_parse(r)?))
}
}
/**
* ErgoTransaction is an atomic state transition operation. It destroys Boxes from the state
* and creates new ones. If transaction is spending boxes protected by some non-trivial scripts,
* its inputs should also contain proof of spending correctness - context extension (user-defined
* key-value map) and data inputs (links to existing boxes in the state) that may be used during
* script reduction to crypto, signatures that satisfies the remaining cryptographic protection
* of the script.
* Transactions are not encrypted, so it is possible to browse and view every transaction ever
* collected into a block.
*/
#[derive(PartialEq, Debug)]
pub struct Transaction {
/// inputs, that will be spent by this transaction.
pub inputs: Vec<Input>,
/// inputs, that are not going to be spent by transaction, but will be reachable from inputs
/// scripts. `dataInputs` scripts will not be executed, thus their scripts costs are not
/// included in transaction cost and they do not contain spending proofs.
pub data_inputs: Vec<DataInput>,
/// box candidates to be created by this transaction. Differ from ordinary ones in that
/// they do not include transaction id and index
pub output_candidates: Vec<ErgoBoxCandidate>,
}
impl SigmaSerializable for Transaction {
fn sigma_serialize<W: vlq_encode::WriteSigmaVlqExt>(&self, w: &mut W) -> Result<(), io::Error> {
// reference implementation - https://github.com/ScorexFoundation/sigmastate-interpreter/blob/9b20cb110effd1987ff76699d637174a4b2fb441/sigmastate/src/main/scala/org/ergoplatform/ErgoLikeTransaction.scala#L112-L112
w.put_usize_as_u16(self.inputs.len())?;
self.inputs.iter().try_for_each(|i| i.sigma_serialize(w))?;
w.put_usize_as_u16(self.data_inputs.len())?;
self.data_inputs
.iter()
.try_for_each(|i| i.sigma_serialize(w))?;
// Serialize distinct ids of tokens in transaction outputs.
// This optimization is crucial to allow up to MaxTokens (== 255) in a box.
// Without it total size of all token ids 255 * 32 = 8160, way beyond MaxBoxSize (== 4K)
let token_ids: Vec<TokenId> = self
.output_candidates
.iter()
.flat_map(|b| b.tokens.iter().map(|t| t.token_id.clone()))
.collect();
let distinct_token_ids: IndexSet<TokenId> = IndexSet::from_iter(token_ids);
w.put_u32(u32::try_from(distinct_token_ids.len()).unwrap())?;
distinct_token_ids
.iter()
.try_for_each(|t_id| t_id.sigma_serialize(w))?;
// serialize outputs
w.put_usize_as_u16(self.output_candidates.len())?;
self.output_candidates.iter().try_for_each(|o| {
ErgoBoxCandidate::serialize_body_with_indexed_digests(o, Some(&distinct_token_ids), w)
})?;
Ok(())
}
fn sigma_parse<R: vlq_encode::ReadSigmaVlqExt>(r: &mut R) -> Result<Self, SerializationError> {
// reference implementation - https://github.com/ScorexFoundation/sigmastate-interpreter/blob/9b20cb110effd1987ff76699d637174a4b2fb441/sigmastate/src/main/scala/org/ergoplatform/ErgoLikeTransaction.scala#L146-L146
// parse transaction inputs
let inputs_count = r.get_u16()?;
let mut inputs = Vec::with_capacity(inputs_count as usize);
for _ in 0..inputs_count {
inputs.push(Input::sigma_parse(r)?);
}
// parse transaction data inputs
let data_inputs_count = r.get_u16()?;
let mut data_inputs = Vec::with_capacity(data_inputs_count as usize);
for _ in 0..data_inputs_count {
data_inputs.push(DataInput::sigma_parse(r)?);
}
// parse distinct ids of tokens in transaction outputs
let tokens_count = r.get_u32()?;
let mut token_ids = IndexSet::with_capacity(tokens_count as usize);
for _ in 0..tokens_count {
token_ids.insert(TokenId::sigma_parse(r)?);
}
// parse outputs
let outputs_count = r.get_u16()?;
let mut outputs = Vec::with_capacity(outputs_count as usize);
for _ in 0..outputs_count {
outputs.push(ErgoBoxCandidate::parse_body_with_indexed_digests(
Some(&token_ids),
r,
)?)
}
Ok(Transaction {
inputs,
data_inputs,
output_candidates: outputs,
})
}
}
#[cfg(feature = "with-serde")]
impl serde::Serialize for Transaction {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// not implmented
s.serialize_str("TBD")
}
}
#[cfg(feature = "with-serde")]
impl<'de> serde::Deserialize<'de> for Transaction {
fn deserialize<D>(_: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
todo!()
}
}
#[cfg(test)]
mod tests {
use super::*;
use sigma_ser::test_helpers::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
impl Arbitrary for Transaction {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
(
vec(any::<Input>(), 1..10),
vec(any::<DataInput>(), 0..10),
vec(any::<ErgoBoxCandidate>(), 1..10),
)
.prop_map(|(inputs, data_inputs, outputs)| Self {
inputs,
data_inputs,
output_candidates: outputs,
})
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}
proptest! {
#[test]
fn tx_ser_roundtrip(v in any::<Transaction>()) {
prop_assert_eq![sigma_serialize_roundtrip(&v), v];
}
#[test]
fn tx_id_ser_roundtrip(v in any::<TxId>()) {
prop_assert_eq![sigma_serialize_roundtrip(&v), v];
}
}
}
| 36.283505 | 221 | 0.637306 |
875a3ee9a6bf5b9a9fe5e884fd18237543489bde | 2,390 | use std::{env, process::exit};
use url::Url;
use matrix_sdk::{
self,
events::{
room::message::{MessageEventContent, TextMessageEventContent},
SyncMessageEvent,
},
Client, ClientConfig, EventEmitter, SyncRoom, SyncSettings,
};
use matrix_sdk_common_macros::async_trait;
struct EventCallback;
#[async_trait]
impl EventEmitter for EventCallback {
async fn on_room_message(&self, room: SyncRoom, event: &SyncMessageEvent<MessageEventContent>) {
if let SyncRoom::Joined(room) = room {
if let SyncMessageEvent {
content: MessageEventContent::Text(TextMessageEventContent { body: msg_body, .. }),
sender,
..
} = event
{
let name = {
// any reads should be held for the shortest time possible to
// avoid dead locks
let room = room.read().await;
let member = room.joined_members.get(&sender).unwrap();
member.name()
};
println!("{}: {}", name, msg_body);
}
}
}
}
async fn login(
homeserver_url: String,
username: &str,
password: &str,
) -> Result<(), matrix_sdk::Error> {
let client_config = ClientConfig::new()
.proxy("http://localhost:8080")?
.disable_ssl_verification();
let homeserver_url = Url::parse(&homeserver_url).expect("Couldn't parse the homeserver URL");
let mut client = Client::new_with_config(homeserver_url, client_config).unwrap();
client.add_event_emitter(Box::new(EventCallback)).await;
client
.login(username, password, None, Some("rust-sdk"))
.await?;
client.sync_forever(SyncSettings::new(), |_| async {}).await;
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), matrix_sdk::Error> {
tracing_subscriber::fmt::init();
let (homeserver_url, username, password) =
match (env::args().nth(1), env::args().nth(2), env::args().nth(3)) {
(Some(a), Some(b), Some(c)) => (a, b, c),
_ => {
eprintln!(
"Usage: {} <homeserver_url> <username> <password>",
env::args().next().unwrap()
);
exit(1)
}
};
login(homeserver_url, &username, &password).await
}
| 30.641026 | 100 | 0.554393 |
c18fecd7c569e09e469cee74d3c5dd981130e285 | 656 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty issue #37195
// Testing that a plain .rs file can load modules from other source files
mod mod_file_aux;
pub fn main() {
assert_eq!(mod_file_aux::foo(), 10);
}
| 32.8 | 73 | 0.733232 |
9c2adb79097ca20e79ef975b650e84ed7b564862 | 5,934 | extern crate alloc;
use crate::result::*;
use crate::svc;
use crate::util;
use crate::mem;
use core::ptr;
pub type ThreadName = [u8; 0x20];
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u8)]
pub enum ThreadState {
NotInitialized = 0,
Initialized = 1,
DestroyedBeforeStarted = 2,
Started = 3,
Terminated = 4
}
extern fn thread_entry_impl(thread_arg: *mut u8) -> ! {
let thread_ref = thread_arg as *mut Thread;
set_current_thread(thread_ref);
unsafe {
if let Some(entry) = (*thread_ref).entry {
let entry_arg = (*thread_ref).entry_arg;
(entry)(entry_arg);
}
}
svc::exit_thread();
}
pub const INVALID_PRIORITY: i32 = -1;
#[repr(C)]
pub struct Thread {
pub self_ref: *mut Thread,
pub state: ThreadState,
pub owns_stack: bool,
pub pad: [u8; 2],
pub handle: svc::Handle,
pub stack: *mut u8,
pub stack_size: usize,
pub entry: Option<fn(*mut u8)>,
pub entry_arg: *mut u8,
pub tls_slots: [*mut u8; 0x20],
pub reserved: [u8; 0x54],
pub name_len: u32,
pub name: ThreadName,
pub name_addr: *mut u8,
pub reserved_2: [u8; 0x20],
}
impl Thread {
pub const fn empty() -> Self {
Self {
self_ref: ptr::null_mut(),
state: ThreadState::NotInitialized,
owns_stack: false,
pad: [0; 2],
handle: 0,
stack: ptr::null_mut(),
stack_size: 0,
entry: None,
entry_arg: ptr::null_mut(),
tls_slots: [ptr::null_mut(); 0x20],
reserved: [0; 0x54],
name_len: 0,
name: [0; 0x20],
name_addr: ptr::null_mut(),
reserved_2: [0; 0x20],
}
}
pub fn existing(handle: svc::Handle, name: &str, stack: *mut u8, stack_size: usize, owns_stack: bool, entry: Option<fn(*mut u8)>, entry_arg: *mut u8) -> Result<Self> {
let mut thread = Self {
self_ref: ptr::null_mut(),
state: ThreadState::Started,
owns_stack: owns_stack,
pad: [0; 2],
handle: handle,
stack: stack,
stack_size: stack_size,
entry: entry,
entry_arg: entry_arg,
tls_slots: [ptr::null_mut(); 0x20],
reserved: [0; 0x54],
name_len: 0,
name: [0; 0x20],
name_addr: ptr::null_mut(),
reserved_2: [0; 0x20],
};
thread.self_ref = &mut thread;
thread.name_addr = &mut thread.name as *mut ThreadName as *mut u8;
thread.set_name(name)?;
Ok(thread)
}
pub fn new(entry: fn(*mut u8), entry_arg: *mut u8, stack: *mut u8, stack_size: usize, name: &str) -> Result<Self> {
let mut stack_value = stack;
let mut owns_stack = false;
if stack_value.is_null() {
unsafe {
let stack_layout = alloc::alloc::Layout::from_size_align_unchecked(stack_size, mem::PAGE_ALIGNMENT);
stack_value = alloc::alloc::alloc(stack_layout);
owns_stack = true;
}
}
Self::existing(0, name, stack_value, stack_size, owns_stack, Some(entry), entry_arg)
}
pub fn create(&mut self, priority: i32, cpu_id: i32) -> Result<()> {
let mut priority_value = priority;
if priority_value == INVALID_PRIORITY {
priority_value = get_current_thread().get_priority()?;
}
self.handle = svc::create_thread(thread_entry_impl, self as *mut _ as *mut u8, (self.stack as usize + self.stack_size) as *const u8, priority_value, cpu_id)?;
Ok(())
}
pub fn set_name(&mut self, name: &str) -> Result<()> {
util::copy_str_to_pointer(name, self.name_addr)
}
pub fn get_name(&mut self) -> Result<&'static str> {
util::get_str_from_pointer(&mut self.name as *mut _ as *mut u8, self.name.len())
}
pub fn get_handle(&self) -> svc::Handle {
self.handle
}
pub fn get_priority(&self) -> Result<i32> {
svc::get_thread_priority(self.handle)
}
pub fn get_id(&self) -> Result<u64> {
svc::get_thread_id(self.handle)
}
pub fn start(&self) -> Result<()> {
svc::start_thread(self.handle)
}
pub fn create_and_start(&mut self, priority: i32, cpu_id: i32) -> Result<()> {
self.create(priority, cpu_id)?;
self.start()
}
pub fn join(&self) -> Result<()> {
svc::wait_synchronization(&self.handle, 1, -1)?;
Ok(())
}
}
impl Drop for Thread {
fn drop(&mut self) {
if self.owns_stack {
unsafe {
let stack_layout = alloc::alloc::Layout::from_size_align_unchecked(self.stack_size, mem::PAGE_ALIGNMENT);
alloc::alloc::dealloc(self.stack, stack_layout);
}
}
// If a thread is not created (like the main thread) the entry field will have nothing (Thread::empty), and we want to avoid closing threads we did not create :P
if self.entry.is_some() {
let _ = svc::close_handle(self.handle);
}
}
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct Tls {
pub ipc_buffer: [u8; 0x100],
pub preemption_state: u32,
pub unk: [u8; 0xF4],
pub thread_ref: *mut Thread,
}
pub fn get_thread_local_storage() -> *mut Tls {
let tls: *mut Tls;
unsafe {
llvm_asm!("mrs x0, tpidrro_el0" : "={x0}"(tls) ::: "volatile");
}
tls
}
pub fn set_current_thread(thread_ref: *mut Thread) {
unsafe {
(*thread_ref).self_ref = thread_ref;
(*thread_ref).name_addr = &mut (*thread_ref).name as *mut _ as *mut u8;
let tls = get_thread_local_storage();
(*tls).thread_ref = thread_ref;
}
}
pub fn get_current_thread() -> &'static mut Thread {
unsafe {
let tls = get_thread_local_storage();
&mut *(*tls).thread_ref
}
} | 28.528846 | 171 | 0.567914 |
16f9e14888c39bc570a749269b09bb1d1c26aea7 | 1,750 | #![cfg(target_os = "windows")]
use winapi;
use winapi::shared::windef::HWND;
pub use self::events_loop::{EventsLoop, EventsLoopProxy};
pub use self::monitor::MonitorId;
pub use self::window::Window;
#[derive(Clone, Default)]
pub struct PlatformSpecificWindowBuilderAttributes {
pub parent: Option<HWND>,
pub taskbar_icon: Option<::Icon>,
pub no_redirection_bitmap: bool,
}
unsafe impl Send for PlatformSpecificWindowBuilderAttributes {}
unsafe impl Sync for PlatformSpecificWindowBuilderAttributes {}
// Cursor name in UTF-16. Used to set cursor in `WM_SETCURSOR`.
#[derive(Debug, Clone, Copy)]
pub struct Cursor(pub *const winapi::ctypes::wchar_t);
unsafe impl Send for Cursor {}
unsafe impl Sync for Cursor {}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DeviceId(u32);
impl DeviceId {
pub unsafe fn dummy() -> Self {
DeviceId(0)
}
}
impl DeviceId {
pub fn get_persistent_identifier(&self) -> Option<String> {
if self.0 != 0 {
raw_input::get_raw_input_device_name(self.0 as _)
} else {
None
}
}
}
// Constant device ID, to be removed when this backend is updated to report real device IDs.
const DEVICE_ID: ::DeviceId = ::DeviceId(DeviceId(0));
fn wrap_device_id(id: u32) -> ::DeviceId {
::DeviceId(DeviceId(id))
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct WindowId(HWND);
unsafe impl Send for WindowId {}
unsafe impl Sync for WindowId {}
impl WindowId {
pub unsafe fn dummy() -> Self {
use std::ptr::null_mut;
WindowId(null_mut())
}
}
mod dpi;
mod drop_handler;
mod event;
mod events_loop;
mod icon;
mod monitor;
mod raw_input;
mod util;
mod window;
| 23.648649 | 92 | 0.689143 |
ddb092331132d8deae678c11d242fde5517c29ce | 7,964 | use anyhow::Context;
use openat;
use openat_ext::*;
use std::fs::File;
use std::io::prelude::*;
use std::{error, result};
use tempfile;
type Result<T> = result::Result<T, Box<dyn error::Error>>;
#[test]
fn open_file_optional() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
assert!(d.open_file_optional("bar")?.is_none());
d.write_file("bar", 0o644)?.sync_all()?;
assert!(d.open_file_optional("bar")?.is_some());
Ok(())
}
#[test]
fn dir_tests() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
assert!(d.sub_dir_optional("bar")?.is_none());
assert!(!d.exists("bar")?);
d.create_dir("bar", 0o755)?;
assert!(d.exists("bar")?);
d.ensure_dir("bar", 0o755)?;
let bar = d.sub_dir_optional("bar")?.expect("bar");
assert_eq!(0, bar.list_dir(".")?.count());
assert!(!d.exists("baz")?);
assert!(!d.remove_dir_optional("baz")?);
d.ensure_dir("baz", 0o755)?;
assert!(d.remove_dir_optional("baz")?);
assert!(!d.exists("baz")?);
Ok(())
}
#[test]
fn exists() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
assert!(!d.exists("l")?);
d.symlink("l", "enoent")?;
assert!(d.exists("l")?);
Ok(())
}
#[test]
fn copy() -> Result<()> {
let td = tempfile::tempdir()?;
let src_p = td.path().join("testfile");
let dest_p = td.path().join("testfiledest");
let contents = "somefilecontents";
std::fs::write(&src_p, contents)?;
let src = File::open(&src_p)?;
{
let dest = File::create(&dest_p)?;
src.copy_to(&dest)?;
}
let testf_contents = std::fs::read_to_string(&dest_p)?;
assert_eq!(contents, testf_contents.as_str());
Ok(())
}
#[test]
fn write_file_with() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
let testname = "testfile";
let testcontents = "hello world";
d.write_file_with("testfile", 0o644, |w| -> std::io::Result<()> {
w.write_all(testcontents.as_bytes())
})?;
let actual_contents = std::fs::read_to_string(td.path().join(testname))?;
assert_eq!(testcontents, actual_contents.as_str());
let testcontents2 = "updated world";
d.write_file_with("testfile", 0o644, |w| -> anyhow::Result<()> {
w.write_all(testcontents2.as_bytes())?;
Ok(())
})?;
let actual_contents = std::fs::read_to_string(td.path().join(testname))?;
assert_eq!(testcontents2, actual_contents.as_str());
Ok(())
}
#[test]
fn write_file_with_complex() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
let testname = "testfile";
let testcontents = "hello world";
d.write_file_with("testfile", 0o644, |w| -> std::io::Result<()> {
w.write_all(testcontents.as_bytes())
})?;
let actual_contents = std::fs::read_to_string(td.path().join(testname))?;
assert_eq!(testcontents, actual_contents.as_str());
Ok(())
}
#[test]
fn write_file_contents() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
let testname = "testfile";
let testcontents = "hello world";
d.write_file_contents("testfile", 0o644, testcontents)?;
let actual_contents = std::fs::read_to_string(td.path().join(testname))?;
assert_eq!(testcontents, actual_contents.as_str());
Ok(())
}
#[test]
fn file_writer() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
let testname = "testfile";
let testcontents = "hello world";
let mut fw = d.new_file_writer(testname, 0o644)?;
fw.writer.write_all(testcontents.as_bytes())?;
fw.complete()?;
let actual_contents = std::fs::read_to_string(td.path().join(testname))?;
assert_eq!(testcontents, actual_contents.as_str());
Ok(())
}
#[test]
fn file_writer_abandon() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
let testname = "testfile";
let testcontents = "hello world";
{
let mut fw = d.new_file_writer(testname, 0o644)?;
fw.writer.write_all(testcontents.as_bytes())?;
fw.abandon();
}
assert!(d.open_file_optional(testname)?.is_none());
Ok(())
}
#[test]
fn file_writer_panic() -> Result<()> {
let td = tempfile::tempdir()?;
let d = openat::Dir::open(td.path())?;
let result = std::panic::catch_unwind(move || -> std::io::Result<()> {
let _fw = d
.new_file_writer("sometestfile", 0o644)
.expect("new writer");
Ok(())
});
match result {
Ok(_) => panic!("expected panic from FileWriter"),
Err(e) => {
if let Some(s) = e.downcast_ref::<String>() {
assert!(s.contains("FileWriter must be explicitly"));
} else {
panic!("Unexpected panic")
}
}
}
Ok(())
}
#[test]
fn rmrf() -> anyhow::Result<()> {
use std::fs::create_dir_all;
use std::fs::write as fswrite;
use std::os::unix::fs::symlink;
let tempdir = tempfile::tempdir()?;
let d = openat::Dir::open(tempdir.path())?;
let td = tempdir.path().join("t");
for d in &["foo/bar/baz", "foo/bar/blah", "blah/moo", "somedir"] {
let p = td.join(d);
create_dir_all(&p)?;
symlink("/", p.join("somelink"))?;
symlink("somelink", p.join("otherlink"))?;
symlink(".", p.join("link2self"))?;
let linkeddir = p.join("linkdirtarget");
create_dir_all(&linkeddir)?;
symlink(&linkeddir, p.join("link2dir"))?;
}
for f in &["somefile", "otherfile"] {
fswrite(td.join("foo/bar").join(f), f)?;
fswrite(td.join("blah").join(f), f)?;
fswrite(td.join("blah/moo").join(f), f)?;
}
assert!(d.remove_all("t").context("removing t")?);
assert!(!d.exists("t")?);
assert!(!d.remove_all("nosuchfile").context("removing nosuchfile")?);
let l = tempdir.path().join("somelink");
let regf = tempdir.path().join("regfile");
fswrite(®f, "some file contents")?;
symlink("regfile", &l)?;
assert!(d.remove_all("somelink")?);
assert!(!d.remove_all("somelink")?);
assert!(d.exists("regfile")?);
assert!(d.remove_all("regfile")?);
assert!(!d.exists("regfile")?);
Ok(())
}
fn assert_file_contents<P: AsRef<std::path::Path>>(
d: &openat::Dir,
p: P,
contents: &str,
) -> Result<()> {
let mut buf = String::new();
let _ = d.open_file(p.as_ref())?.read_to_string(&mut buf)?;
assert_eq!(buf, contents);
Ok(())
}
#[test]
fn copy_path() -> Result<()> {
let td = tempfile::tempdir()?;
let td = &openat::Dir::open(td.path())?;
// Copy plain src to dest in same dir
td.write_file_contents("srcf", 0o644, "src contents")?;
td.copy_file("srcf", "destf").context("copy1")?;
assert_file_contents(td, "destf", "src contents")?;
// Overwrite
td.write_file_contents("srcf", 0o644, "src contents v2")?;
td.copy_file("srcf", "destf").context("copy overwrite")?;
assert_file_contents(td, "destf", "src contents v2")?;
// Copy across subdirectories
td.ensure_dir_all("sub1.1/sub2", 0o755)?;
td.ensure_dir_all("sub1.2/sub2", 0o755)?;
td.write_file_contents("sub1.1/sub2/blah", 0o600, "somesecretvalue")?;
let subtarget = "sub1.2/sub2/blahcopy";
td.copy_file("sub1.1/sub2/blah", subtarget)
.context("copy2")?;
assert_file_contents(td, subtarget, "somesecretvalue")?;
assert_eq!(td.metadata(subtarget)?.stat().st_mode & 0o777, 0o600);
// We don't follow links by default
td.symlink("somelink", "srcf")?;
assert!(td.copy_file("somelink", "srcf-from-link").is_err());
// Nonexistent file
assert!(td.copy_file("enoent", "nosuchdest").is_err());
// Directory
assert!(td.copy_file("sub1.1", "nosuchdest").is_err());
Ok(())
}
| 30.988327 | 77 | 0.589653 |
cce01c04a567b51a6b7558ad19dd1be7617db66d | 4,972 | use crate::interactive::{
widgets::{
Entries, EntriesProps, Footer, FooterProps, Header, HelpPane, HelpPaneProps, MarkPane,
MarkPaneProps, COLOR_MARKED,
},
AppState, DisplayOptions, FocussedPane,
};
use dua::traverse::Traversal;
use std::borrow::Borrow;
use tui::{
buffer::Buffer,
layout::{Constraint, Direction, Layout, Rect},
style::Modifier,
style::{Color, Style},
};
use Constraint::*;
use FocussedPane::*;
pub struct MainWindowProps<'a> {
pub traversal: &'a Traversal,
pub display: DisplayOptions,
pub state: &'a AppState,
}
#[derive(Default)]
pub struct MainWindow {
pub help_pane: Option<HelpPane>,
pub entries_pane: Entries,
pub mark_pane: Option<MarkPane>,
}
impl MainWindow {
pub fn render<'a>(
&mut self,
props: impl Borrow<MainWindowProps<'a>>,
area: Rect,
buf: &mut Buffer,
) {
let MainWindowProps {
traversal:
Traversal {
tree,
entries_traversed,
total_bytes,
..
},
display,
state,
} = props.borrow();
let (entries_style, help_style, mark_style) = {
let grey = Style {
fg: Color::DarkGray.into(),
bg: Color::Reset.into(),
add_modifier: Modifier::empty(),
..Style::default()
};
let bold = Style {
fg: Color::Rgb(230, 230, 230).into(),
add_modifier: Modifier::BOLD,
..grey
};
match state.focussed {
Main => (bold, grey, grey),
Help => (grey, bold, grey),
Mark => (grey, grey, bold),
}
};
let (header_area, entries_area, footer_area) = {
let regions = Layout::default()
.direction(Direction::Vertical)
.constraints([Length(1), Max(256), Length(1)].as_ref())
.split(area);
(regions[0], regions[1], regions[2])
};
{
let marked = self.mark_pane.as_ref().map(|p| p.marked());
let bg_color = match (marked.map_or(true, |m| m.is_empty()), state.focussed) {
(false, FocussedPane::Mark) => Color::LightRed,
(false, _) => COLOR_MARKED,
(_, _) => Color::White,
};
Header.render(bg_color, header_area, buf);
}
let (entries_area, help_pane, mark_pane) = {
let regions = Layout::default()
.direction(Direction::Horizontal)
.constraints([Percentage(50), Percentage(50)].as_ref())
.split(entries_area);
let (left_pane, right_pane) = (regions[0], regions[1]);
match (&mut self.help_pane, &mut self.mark_pane) {
(Some(ref mut pane), None) => (left_pane, Some((right_pane, pane)), None),
(None, Some(ref mut pane)) => (left_pane, None, Some((right_pane, pane))),
(Some(ref mut help), Some(ref mut mark)) => {
let regions = Layout::default()
.direction(Direction::Vertical)
.constraints([Percentage(50), Percentage(50)].as_ref())
.split(right_pane);
(
left_pane,
Some((regions[0], help)),
Some((regions[1], mark)),
)
}
(None, None) => (entries_area, None, None),
}
};
if let Some((mark_area, pane)) = mark_pane {
let props = MarkPaneProps {
border_style: mark_style,
format: display.byte_format,
};
pane.render(props, mark_area, buf);
}
if let Some((help_area, pane)) = help_pane {
let props = HelpPaneProps {
border_style: help_style,
has_focus: matches!(state.focussed, Help),
};
pane.render(props, help_area, buf);
}
let marked = self.mark_pane.as_ref().map(|p| p.marked());
let props = EntriesProps {
tree: &tree,
root: state.root,
display: *display,
entries: &state.entries,
marked,
selected: state.selected,
border_style: entries_style,
is_focussed: matches!(state.focussed, Main),
};
self.entries_pane.render(props, entries_area, buf);
Footer.render(
FooterProps {
total_bytes: *total_bytes,
format: display.byte_format,
entries_traversed: *entries_traversed,
message: state.message.clone(),
},
footer_area,
buf,
);
}
}
| 32.927152 | 94 | 0.488938 |
160b6640db082064c53dc4aad65c7b7303b61427 | 1,528 |
pub struct IconLocalPostOffice {
props: crate::Props,
}
impl yew::Component for IconLocalPostOffice {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M12 11l8-5H4zM4 8v10h16V8l-8 5z" opacity=".3"/><path d="M20 4H4c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V6c0-1.1-.9-2-2-2zm0 14H4V8l8 5 8-5v10zm-8-7L4 6h16l-8 5z"/></svg>
</svg>
}
}
}
| 33.217391 | 323 | 0.579843 |
2281c8681490cd221fd14476dbd59d14ac2a64b9 | 4,814 | use crate::{ CmdOracleCtx, CmdOracle, PrioQueue };
use crate::msg::{ Messages, pyld, inter, plain };
use crate::oracle;
use std::u8;
use snafu::{ Snafu, ResultExt, ensure };
use crossbeam::thread;
use crossbeam::thread::ScopedJoinHandle;
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("error in oracle: {}", source))]
Oracle { source: oracle::Error },
#[snafu(display("tried all 256 bytes without success"))]
Tries,
#[snafu(display("invalid input: {}", reason))]
Input { reason: String }
}
#[derive(Debug)]
pub struct Dec {
pub intermediate: Vec<u8>,
pub plain: Vec<u8>
}
impl Dec {
pub fn new(intermediate: Vec<u8>, plain: Vec<u8>) -> Self {
Dec { intermediate, plain }
}
}
type DecResult = Result<Dec>;
trait ProgressCb = Fn(Messages) + Sync + Send;
struct BlockDecypt<'a, F: ProgressCb> {
b_pyld: Vec<u8>,
b_inter: Vec<u8>,
b_plain: Vec<u8>,
iv: &'a [u8],
oracle: CmdOracle,
chars: &'a PrioQueue,
last: bool,
block: usize,
blksz: u8,
cb: &'a F
}
// yes, thats how lazy i am
macro_rules! i { ($x:expr) => { $x as usize }; }
impl<'a, F: ProgressCb> BlockDecypt<'a, F> {
pub fn new(blk: &[u8], iv: &'a [u8],
oracle: CmdOracle,
chars: &'a PrioQueue,
cb: &'a F, block: usize,
last: bool) -> Result<Self> {
ensure!(blk.len() <= u8::MAX as usize,
Input { reason: "blocksize must be below 256" });
let blksz = blk.len();
let mut b_pyld = vec![0u8; blksz * 2];
b_pyld[blksz..].copy_from_slice(blk);
Ok(BlockDecypt {
b_inter: vec![0u8; blksz],
b_plain: vec![0u8; blksz],
blksz: blksz as u8,
b_pyld, iv, oracle, chars, last, cb, block
})
}
fn decrypt_byte(&mut self, i: u8, mut chars: impl Iterator<Item = u8>)
-> Result<u8, Error> {
let pad = self.blksz - i;
self.b_pyld.iter_mut()
.zip(self.b_inter.iter())
.skip(i as usize +1)
.for_each(|(p, i)| *p = pad ^ i);
chars.try_find(|b| {
self.b_pyld[i!(i)] = *b ^ (pad ^ self.iv[i!(i)]);
(self.cb)(pyld(self.b_pyld[0..i!(self.blksz)].to_vec(),
i as u8, self.block));
self.oracle.request(&self.b_pyld).context(Oracle)
})?
.ok_or(Error::Tries)
}
fn skip_pad(&mut self) -> Result<u8, Error> {
match self.last {
true => {
let res = self.decrypt_byte(self.blksz -1, 1..self.blksz -1)?;
(self.blksz - res .. self.blksz)
.map(|i| i as usize)
.for_each(|i| {
self.b_inter[i] = res ^ self.iv[i];
self.b_plain[i] = res;
});
Ok(self.blksz - res)
},
false => Ok(self.blksz)
}
}
pub fn decrypt(mut self) -> DecResult {
for i in (0 .. self.skip_pad()?).rev() {
let res = self.decrypt_byte(i, self.chars.iter())?;
self.b_inter[i!(i)] = res ^ self.iv[i!(i)];
self.b_plain[i!(i)] = res;
self.chars.hit(res);
(self.cb)(inter(self.b_inter.clone(), i, self.block));
(self.cb)(plain(self.b_plain.clone(), i, self.block));
}
Ok(Dec::new(self.b_inter, self.b_plain))
}
}
pub fn decrypt<F>(cipher: &[u8],
blksz: u8,
oracle: &CmdOracleCtx,
prog: F,
chars: &[u8; 256],
iv: bool) -> Vec<DecResult>
where F: Fn(Messages) + Sync + Send {
let mut blocks = cipher
.chunks(blksz as usize)
.collect::<Vec<&[u8]>>();
let ivblk: Vec<u8>;
if !iv {
ivblk = vec![0; blksz as usize];
blocks.insert(0, &ivblk);
}
let chars = PrioQueue::new(chars.to_vec());
let res = thread::scope(|s| {
let blkc = blocks.len();
let chars = &chars;
let prog = &prog;
(1..blocks.len())
.map(|i| {
let blk1 = blocks[i];
let blk0 = blocks[i-1];
s.spawn(move |_|
BlockDecypt::new(blk1, blk0,
oracle.spawn().context(Oracle)?,
chars, prog, i -1,
i == blkc -1 && (iv || blkc > 2))?
.decrypt())})
.collect::<Vec<ScopedJoinHandle<_>>>()
.into_iter()
.map(|h| h.join().unwrap())
.collect()
}).unwrap();
(prog)(Messages::Done);
res
}
| 29.175758 | 78 | 0.477565 |
fe16aeeb95a5dcdeb5cf307529044233691ce96d | 58,982 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use super::{
data::{into_buffers, new_buffers},
ArrayData, ArrayDataBuilder,
};
use crate::array::StringOffsetSizeTrait;
use crate::{
buffer::MutableBuffer,
datatypes::DataType,
error::{ArrowError, Result},
util::bit_util,
};
use half::f16;
use std::mem;
mod boolean;
mod fixed_binary;
mod list;
mod null;
mod primitive;
mod structure;
mod utils;
mod variable_size;
type ExtendNullBits<'a> = Box<dyn Fn(&mut _MutableArrayData, usize, usize) + 'a>;
// function that extends `[start..start+len]` to the mutable array.
// this is dynamic because different data_types influence how buffers and children are extended.
type Extend<'a> = Box<dyn Fn(&mut _MutableArrayData, usize, usize, usize) + 'a>;
type ExtendNulls = Box<dyn Fn(&mut _MutableArrayData, usize)>;
/// A mutable [ArrayData] that knows how to freeze itself into an [ArrayData].
/// This is just a data container.
#[derive(Debug)]
struct _MutableArrayData<'a> {
pub data_type: DataType,
pub null_count: usize,
pub len: usize,
pub null_buffer: MutableBuffer,
// arrow specification only allows up to 3 buffers (2 ignoring the nulls above).
// Thus, we place them in the stack to avoid bound checks and greater data locality.
pub buffer1: MutableBuffer,
pub buffer2: MutableBuffer,
pub child_data: Vec<MutableArrayData<'a>>,
}
impl<'a> _MutableArrayData<'a> {
fn freeze(self, dictionary: Option<ArrayData>) -> ArrayDataBuilder {
let buffers = into_buffers(&self.data_type, self.buffer1, self.buffer2);
let child_data = match self.data_type {
DataType::Dictionary(_, _) => vec![dictionary.unwrap()],
_ => {
let mut child_data = Vec::with_capacity(self.child_data.len());
for child in self.child_data {
child_data.push(child.freeze());
}
child_data
}
};
let mut array_data_builder = ArrayDataBuilder::new(self.data_type)
.offset(0)
.len(self.len)
.null_count(self.null_count)
.buffers(buffers)
.child_data(child_data);
if self.null_count > 0 {
array_data_builder =
array_data_builder.null_bit_buffer(self.null_buffer.into());
}
array_data_builder
}
}
fn build_extend_null_bits(array: &ArrayData, use_nulls: bool) -> ExtendNullBits {
if let Some(bitmap) = array.null_bitmap() {
let bytes = bitmap.bits.as_slice();
Box::new(move |mutable, start, len| {
utils::resize_for_bits(&mut mutable.null_buffer, mutable.len + len);
mutable.null_count += crate::util::bit_mask::set_bits(
mutable.null_buffer.as_slice_mut(),
bytes,
mutable.len,
array.offset() + start,
len,
);
})
} else if use_nulls {
Box::new(|mutable, _, len| {
utils::resize_for_bits(&mut mutable.null_buffer, mutable.len + len);
let write_data = mutable.null_buffer.as_slice_mut();
let offset = mutable.len;
(0..len).for_each(|i| {
bit_util::set_bit(write_data, offset + i);
});
})
} else {
Box::new(|_, _, _| {})
}
}
/// Struct to efficiently and interactively create an [ArrayData] from an existing [ArrayData] by
/// copying chunks.
/// The main use case of this struct is to perform unary operations to arrays of arbitrary types, such as `filter` and `take`.
/// # Example:
///
/// ```
/// use arrow::{array::{Int32Array, Array, MutableArrayData}};
///
/// let array = Int32Array::from(vec![1, 2, 3, 4, 5]);
/// let array = array.data();
/// // Create a new `MutableArrayData` from an array and with a capacity of 4.
/// // Capacity here is equivalent to `Vec::with_capacity`
/// let arrays = vec![array];
/// let mut mutable = MutableArrayData::new(arrays, false, 4);
/// mutable.extend(0, 1, 3); // extend from the slice [1..3], [2,3]
/// mutable.extend(0, 0, 3); // extend from the slice [0..3], [1,2,3]
/// // `.freeze()` to convert `MutableArrayData` into a `ArrayData`.
/// let new_array = Int32Array::from(mutable.freeze());
/// assert_eq!(Int32Array::from(vec![2, 3, 1, 2, 3]), new_array);
/// ```
pub struct MutableArrayData<'a> {
#[allow(dead_code)]
arrays: Vec<&'a ArrayData>,
// The attributes in [_MutableArrayData] cannot be in [MutableArrayData] due to
// mutability invariants (interior mutability):
// [MutableArrayData] contains a function that can only mutate [_MutableArrayData], not
// [MutableArrayData] itself
data: _MutableArrayData<'a>,
// the child data of the `Array` in Dictionary arrays.
// This is not stored in `MutableArrayData` because these values constant and only needed
// at the end, when freezing [_MutableArrayData].
dictionary: Option<ArrayData>,
// function used to extend values from arrays. This function's lifetime is bound to the array
// because it reads values from it.
extend_values: Vec<Extend<'a>>,
// function used to extend nulls from arrays. This function's lifetime is bound to the array
// because it reads nulls from it.
extend_null_bits: Vec<ExtendNullBits<'a>>,
// function used to extend nulls.
// this is independent of the arrays and therefore has no lifetime.
extend_nulls: ExtendNulls,
}
impl<'a> std::fmt::Debug for MutableArrayData<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// ignores the closures.
f.debug_struct("MutableArrayData")
.field("data", &self.data)
.finish()
}
}
/// Builds an extend that adds `offset` to the source primitive
/// Additionally validates that `max` fits into the
/// the underlying primitive returning None if not
fn build_extend_dictionary(
array: &ArrayData,
offset: usize,
max: usize,
) -> Option<Extend> {
use crate::datatypes::*;
match array.data_type() {
DataType::Dictionary(child_data_type, _) => match child_data_type.as_ref() {
DataType::UInt8 => {
let _: u8 = max.try_into().ok()?;
let offset: u8 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::UInt16 => {
let _: u16 = max.try_into().ok()?;
let offset: u16 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::UInt32 => {
let _: u32 = max.try_into().ok()?;
let offset: u32 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::UInt64 => {
let _: u64 = max.try_into().ok()?;
let offset: u64 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::Int8 => {
let _: i8 = max.try_into().ok()?;
let offset: i8 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::Int16 => {
let _: i16 = max.try_into().ok()?;
let offset: i16 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::Int32 => {
let _: i32 = max.try_into().ok()?;
let offset: i32 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
DataType::Int64 => {
let _: i64 = max.try_into().ok()?;
let offset: i64 = offset.try_into().ok()?;
Some(primitive::build_extend_with_offset(array, offset))
}
_ => unreachable!(),
},
_ => None,
}
}
fn build_extend(array: &ArrayData) -> Extend {
use crate::datatypes::*;
match array.data_type() {
DataType::Decimal(_, _) => primitive::build_extend::<i128>(array),
DataType::Null => null::build_extend(array),
DataType::Boolean => boolean::build_extend(array),
DataType::UInt8 => primitive::build_extend::<u8>(array),
DataType::UInt16 => primitive::build_extend::<u16>(array),
DataType::UInt32 => primitive::build_extend::<u32>(array),
DataType::UInt64 => primitive::build_extend::<u64>(array),
DataType::Int8 => primitive::build_extend::<i8>(array),
DataType::Int16 => primitive::build_extend::<i16>(array),
DataType::Int32 => primitive::build_extend::<i32>(array),
DataType::Int64 => primitive::build_extend::<i64>(array),
DataType::Float32 => primitive::build_extend::<f32>(array),
DataType::Float64 => primitive::build_extend::<f64>(array),
DataType::Date32
| DataType::Time32(_)
| DataType::Interval(IntervalUnit::YearMonth) => {
primitive::build_extend::<i32>(array)
}
DataType::Date64
| DataType::Time64(_)
| DataType::Timestamp(_, _)
| DataType::Duration(_)
| DataType::Interval(IntervalUnit::DayTime) => {
primitive::build_extend::<i64>(array)
}
DataType::Interval(IntervalUnit::MonthDayNano) => {
primitive::build_extend::<i128>(array)
}
DataType::Utf8 | DataType::Binary => variable_size::build_extend::<i32>(array),
DataType::LargeUtf8 | DataType::LargeBinary => {
variable_size::build_extend::<i64>(array)
}
DataType::Map(_, _) | DataType::List(_) => list::build_extend::<i32>(array),
DataType::LargeList(_) => list::build_extend::<i64>(array),
DataType::Dictionary(_, _) => unreachable!("should use build_extend_dictionary"),
DataType::Struct(_) => structure::build_extend(array),
DataType::FixedSizeBinary(_) => fixed_binary::build_extend(array),
DataType::Float16 => primitive::build_extend::<f16>(array),
/*
DataType::FixedSizeList(_, _) => {}
DataType::Union(_) => {}
*/
ty => todo!(
"Take and filter operations still not supported for this datatype: `{:?}`",
ty
),
}
}
fn build_extend_nulls(data_type: &DataType) -> ExtendNulls {
use crate::datatypes::*;
Box::new(match data_type {
DataType::Decimal(_, _) => primitive::extend_nulls::<i128>,
DataType::Null => null::extend_nulls,
DataType::Boolean => boolean::extend_nulls,
DataType::UInt8 => primitive::extend_nulls::<u8>,
DataType::UInt16 => primitive::extend_nulls::<u16>,
DataType::UInt32 => primitive::extend_nulls::<u32>,
DataType::UInt64 => primitive::extend_nulls::<u64>,
DataType::Int8 => primitive::extend_nulls::<i8>,
DataType::Int16 => primitive::extend_nulls::<i16>,
DataType::Int32 => primitive::extend_nulls::<i32>,
DataType::Int64 => primitive::extend_nulls::<i64>,
DataType::Float32 => primitive::extend_nulls::<f32>,
DataType::Float64 => primitive::extend_nulls::<f64>,
DataType::Date32
| DataType::Time32(_)
| DataType::Interval(IntervalUnit::YearMonth) => primitive::extend_nulls::<i32>,
DataType::Date64
| DataType::Time64(_)
| DataType::Timestamp(_, _)
| DataType::Duration(_)
| DataType::Interval(IntervalUnit::DayTime) => primitive::extend_nulls::<i64>,
DataType::Interval(IntervalUnit::MonthDayNano) => primitive::extend_nulls::<i128>,
DataType::Utf8 | DataType::Binary => variable_size::extend_nulls::<i32>,
DataType::LargeUtf8 | DataType::LargeBinary => variable_size::extend_nulls::<i64>,
DataType::Map(_, _) | DataType::List(_) => list::extend_nulls::<i32>,
DataType::LargeList(_) => list::extend_nulls::<i64>,
DataType::Dictionary(child_data_type, _) => match child_data_type.as_ref() {
DataType::UInt8 => primitive::extend_nulls::<u8>,
DataType::UInt16 => primitive::extend_nulls::<u16>,
DataType::UInt32 => primitive::extend_nulls::<u32>,
DataType::UInt64 => primitive::extend_nulls::<u64>,
DataType::Int8 => primitive::extend_nulls::<i8>,
DataType::Int16 => primitive::extend_nulls::<i16>,
DataType::Int32 => primitive::extend_nulls::<i32>,
DataType::Int64 => primitive::extend_nulls::<i64>,
_ => unreachable!(),
},
DataType::Struct(_) => structure::extend_nulls,
DataType::FixedSizeBinary(_) => fixed_binary::extend_nulls,
DataType::Float16 => primitive::extend_nulls::<f16>,
/*
DataType::FixedSizeList(_, _) => {}
DataType::Union(_) => {}
*/
ty => todo!(
"Take and filter operations still not supported for this datatype: `{:?}`",
ty
),
})
}
fn preallocate_offset_and_binary_buffer<Offset: StringOffsetSizeTrait>(
capacity: usize,
binary_size: usize,
) -> [MutableBuffer; 2] {
// offsets
let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<Offset>());
// safety: `unsafe` code assumes that this buffer is initialized with one element
if Offset::is_large() {
buffer.push(0i64);
} else {
buffer.push(0i32)
}
[
buffer,
MutableBuffer::new(binary_size * mem::size_of::<u8>()),
]
}
/// Define capacities of child data or data buffers.
#[derive(Debug, Clone)]
pub enum Capacities {
/// Binary, Utf8 and LargeUtf8 data types
/// Define
/// * the capacity of the array offsets
/// * the capacity of the binary/ str buffer
Binary(usize, Option<usize>),
/// List and LargeList data types
/// Define
/// * the capacity of the array offsets
/// * the capacity of the child data
List(usize, Option<Box<Capacities>>),
/// Struct type
/// * the capacity of the array
/// * the capacities of the fields
Struct(usize, Option<Vec<Capacities>>),
/// Dictionary type
/// * the capacity of the array/keys
/// * the capacity of the values
Dictionary(usize, Option<Box<Capacities>>),
/// Don't preallocate inner buffers and rely on array growth strategy
Array(usize),
}
impl<'a> MutableArrayData<'a> {
/// returns a new [MutableArrayData] with capacity to `capacity` slots and specialized to create an
/// [ArrayData] from multiple `arrays`.
///
/// `use_nulls` is a flag used to optimize insertions. It should be `false` if the only source of nulls
/// are the arrays themselves and `true` if the user plans to call [MutableArrayData::extend_nulls].
/// In other words, if `use_nulls` is `false`, calling [MutableArrayData::extend_nulls] should not be used.
pub fn new(arrays: Vec<&'a ArrayData>, use_nulls: bool, capacity: usize) -> Self {
Self::with_capacities(arrays, use_nulls, Capacities::Array(capacity))
}
/// Similar to [MutableArrayData::new], but lets users define the preallocated capacities of the array.
/// See also [MutableArrayData::new] for more information on the arguments.
///
/// # Panic
/// This function panics if the given `capacities` don't match the data type of `arrays`. Or when
/// a [Capacities] variant is not yet supported.
pub fn with_capacities(
arrays: Vec<&'a ArrayData>,
mut use_nulls: bool,
capacities: Capacities,
) -> Self {
let data_type = arrays[0].data_type();
use crate::datatypes::*;
// if any of the arrays has nulls, insertions from any array requires setting bits
// as there is at least one array with nulls.
if arrays.iter().any(|array| array.null_count() > 0) {
use_nulls = true;
};
let mut array_capacity;
let [buffer1, buffer2] = match (data_type, &capacities) {
(DataType::LargeUtf8, Capacities::Binary(capacity, Some(value_cap)))
| (DataType::LargeBinary, Capacities::Binary(capacity, Some(value_cap))) => {
array_capacity = *capacity;
preallocate_offset_and_binary_buffer::<i64>(*capacity, *value_cap)
}
(DataType::Utf8, Capacities::Binary(capacity, Some(value_cap)))
| (DataType::Binary, Capacities::Binary(capacity, Some(value_cap))) => {
array_capacity = *capacity;
preallocate_offset_and_binary_buffer::<i32>(*capacity, *value_cap)
}
(_, Capacities::Array(capacity)) => {
array_capacity = *capacity;
new_buffers(data_type, *capacity)
}
_ => panic!("Capacities: {:?} not yet supported", capacities),
};
let child_data = match &data_type {
DataType::Decimal(_, _)
| DataType::Null
| DataType::Boolean
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64
| DataType::Float16
| DataType::Float32
| DataType::Float64
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Timestamp(_, _)
| DataType::Utf8
| DataType::Binary
| DataType::LargeUtf8
| DataType::LargeBinary
| DataType::Interval(_)
| DataType::FixedSizeBinary(_) => vec![],
DataType::Map(_, _) | DataType::List(_) | DataType::LargeList(_) => {
let childs = arrays
.iter()
.map(|array| &array.child_data()[0])
.collect::<Vec<_>>();
let capacities = if let Capacities::List(capacity, ref child_capacities) =
capacities
{
array_capacity = capacity;
child_capacities
.clone()
.map(|c| *c)
.unwrap_or(Capacities::Array(array_capacity))
} else {
Capacities::Array(array_capacity)
};
vec![MutableArrayData::with_capacities(
childs, use_nulls, capacities,
)]
}
// the dictionary type just appends keys and clones the values.
DataType::Dictionary(_, _) => vec![],
DataType::Struct(fields) => match capacities {
Capacities::Struct(capacity, Some(ref child_capacities)) => {
array_capacity = capacity;
(0..fields.len())
.zip(child_capacities)
.map(|(i, child_cap)| {
let child_arrays = arrays
.iter()
.map(|array| &array.child_data()[i])
.collect::<Vec<_>>();
MutableArrayData::with_capacities(
child_arrays,
use_nulls,
child_cap.clone(),
)
})
.collect::<Vec<_>>()
}
Capacities::Struct(capacity, None) => {
array_capacity = capacity;
(0..fields.len())
.map(|i| {
let child_arrays = arrays
.iter()
.map(|array| &array.child_data()[i])
.collect::<Vec<_>>();
MutableArrayData::new(child_arrays, use_nulls, capacity)
})
.collect::<Vec<_>>()
}
_ => (0..fields.len())
.map(|i| {
let child_arrays = arrays
.iter()
.map(|array| &array.child_data()[i])
.collect::<Vec<_>>();
MutableArrayData::new(child_arrays, use_nulls, array_capacity)
})
.collect::<Vec<_>>(),
},
ty => {
todo!("Take and filter operations still not supported for this datatype: `{:?}`", ty)
}
};
// Get the dictionary if any, and if it is a concatenation of multiple
let (dictionary, dict_concat) = match &data_type {
DataType::Dictionary(_, _) => {
// If more than one dictionary, concatenate dictionaries together
let dict_concat = !arrays
.windows(2)
.all(|a| a[0].child_data()[0].ptr_eq(&a[1].child_data()[0]));
match dict_concat {
false => (Some(arrays[0].child_data()[0].clone()), false),
true => {
if let Capacities::Dictionary(_, _) = capacities {
panic!("dictionary capacity not yet supported")
}
let dictionaries: Vec<_> =
arrays.iter().map(|array| &array.child_data()[0]).collect();
let lengths: Vec<_> = dictionaries
.iter()
.map(|dictionary| dictionary.len())
.collect();
let capacity = lengths.iter().sum();
let mut mutable =
MutableArrayData::new(dictionaries, false, capacity);
for (i, len) in lengths.iter().enumerate() {
mutable.extend(i, 0, *len)
}
(Some(mutable.freeze()), true)
}
}
}
_ => (None, false),
};
let extend_nulls = build_extend_nulls(data_type);
let extend_null_bits = arrays
.iter()
.map(|array| build_extend_null_bits(array, use_nulls))
.collect();
let null_buffer = if use_nulls {
let null_bytes = bit_util::ceil(array_capacity, 8);
MutableBuffer::from_len_zeroed(null_bytes)
} else {
// create 0 capacity mutable buffer with the intention that it won't be used
MutableBuffer::with_capacity(0)
};
let extend_values = match &data_type {
DataType::Dictionary(_, _) => {
let mut next_offset = 0;
let extend_values: Result<Vec<_>> = arrays
.iter()
.map(|array| {
let offset = next_offset;
let dict_len = array.child_data()[0].len();
if dict_concat {
next_offset += dict_len;
}
build_extend_dictionary(array, offset, offset + dict_len)
.ok_or(ArrowError::DictionaryKeyOverflowError)
})
.collect();
extend_values.expect("MutableArrayData::new is infallible")
}
_ => arrays.iter().map(|array| build_extend(array)).collect(),
};
let data = _MutableArrayData {
data_type: data_type.clone(),
len: 0,
null_count: 0,
null_buffer,
buffer1,
buffer2,
child_data,
};
Self {
arrays,
data,
dictionary,
extend_values,
extend_null_bits,
extend_nulls,
}
}
/// Extends this array with a chunk of its source arrays
///
/// # Arguments
/// * `index` - the index of array that you what to copy values from
/// * `start` - the start index of the chunk (inclusive)
/// * `end` - the end index of the chunk (exclusive)
///
/// # Panic
/// This function panics if there is an invalid index,
/// i.e. `index` >= the number of source arrays
/// or `end` > the length of the `index`th array
pub fn extend(&mut self, index: usize, start: usize, end: usize) {
let len = end - start;
(self.extend_null_bits[index])(&mut self.data, start, len);
(self.extend_values[index])(&mut self.data, index, start, len);
self.data.len += len;
}
/// Extends this [MutableArrayData] with null elements, disregarding the bound arrays
pub fn extend_nulls(&mut self, len: usize) {
// TODO: null_buffer should probably be extended here as well
// otherwise is_valid() could later panic
// add test to confirm
self.data.null_count += len;
(self.extend_nulls)(&mut self.data, len);
self.data.len += len;
}
/// Returns the current length
#[inline]
pub fn len(&self) -> usize {
self.data.len
}
/// Returns true if len is 0
#[inline]
pub fn is_empty(&self) -> bool {
self.data.len == 0
}
/// Returns the current null count
#[inline]
pub fn null_count(&self) -> usize {
self.data.null_count
}
/// Creates a [ArrayData] from the pushed regions up to this point, consuming `self`.
pub fn freeze(self) -> ArrayData {
unsafe { self.data.freeze(self.dictionary).build_unchecked() }
}
/// Creates a [ArrayDataBuilder] from the pushed regions up to this point, consuming `self`.
/// This is useful for extending the default behavior of MutableArrayData.
pub fn into_builder(self) -> ArrayDataBuilder {
self.data.freeze(self.dictionary)
}
}
#[cfg(test)]
mod tests {
use std::{convert::TryFrom, sync::Arc};
use super::*;
use crate::array::DecimalArray;
use crate::{
array::{
Array, ArrayData, ArrayRef, BooleanArray, DictionaryArray,
FixedSizeBinaryArray, Int16Array, Int16Type, Int32Array, Int64Array,
Int64Builder, ListBuilder, MapBuilder, NullArray, PrimitiveBuilder,
StringArray, StringDictionaryBuilder, StructArray, UInt8Array,
},
buffer::Buffer,
datatypes::Field,
};
use crate::{
array::{ListArray, StringBuilder},
error::Result,
};
fn create_decimal_array(
array: &[Option<i128>],
precision: usize,
scale: usize,
) -> DecimalArray {
array
.iter()
.collect::<DecimalArray>()
.with_precision_and_scale(precision, scale)
.unwrap()
}
#[test]
fn test_decimal() {
let decimal_array =
create_decimal_array(&[Some(1), Some(2), None, Some(3)], 10, 3);
let arrays = vec![decimal_array.data()];
let mut a = MutableArrayData::new(arrays, true, 3);
a.extend(0, 0, 3);
a.extend(0, 2, 3);
let result = a.freeze();
let array = DecimalArray::from(result);
let expected = create_decimal_array(&[Some(1), Some(2), None, None], 10, 3);
assert_eq!(array, expected);
}
#[test]
fn test_decimal_offset() {
let decimal_array =
create_decimal_array(&[Some(1), Some(2), None, Some(3)], 10, 3);
let decimal_array = decimal_array.slice(1, 3); // 2, null, 3
let arrays = vec![decimal_array.data()];
let mut a = MutableArrayData::new(arrays, true, 2);
a.extend(0, 0, 2); // 2, null
let result = a.freeze();
let array = DecimalArray::from(result);
let expected = create_decimal_array(&[Some(2), None], 10, 3);
assert_eq!(array, expected);
}
#[test]
fn test_decimal_null_offset_nulls() {
let decimal_array =
create_decimal_array(&[Some(1), Some(2), None, Some(3)], 10, 3);
let decimal_array = decimal_array.slice(1, 3); // 2, null, 3
let arrays = vec![decimal_array.data()];
let mut a = MutableArrayData::new(arrays, true, 2);
a.extend(0, 0, 2); // 2, null
a.extend_nulls(3); // 2, null, null, null, null
a.extend(0, 1, 3); //2, null, null, null, null, null, 3
let result = a.freeze();
let array = DecimalArray::from(result);
let expected = create_decimal_array(
&[Some(2), None, None, None, None, None, Some(3)],
10,
3,
);
assert_eq!(array, expected);
}
/// tests extending from a primitive array w/ offset nor nulls
#[test]
fn test_primitive() {
let b = UInt8Array::from(vec![Some(1), Some(2), Some(3)]);
let arrays = vec![b.data()];
let mut a = MutableArrayData::new(arrays, false, 3);
a.extend(0, 0, 2);
let result = a.freeze();
let array = UInt8Array::from(result);
let expected = UInt8Array::from(vec![Some(1), Some(2)]);
assert_eq!(array, expected);
}
/// tests extending from a primitive array with offset w/ nulls
#[test]
fn test_primitive_offset() {
let b = UInt8Array::from(vec![Some(1), Some(2), Some(3)]);
let b = b.slice(1, 2);
let arrays = vec![b.data()];
let mut a = MutableArrayData::new(arrays, false, 2);
a.extend(0, 0, 2);
let result = a.freeze();
let array = UInt8Array::from(result);
let expected = UInt8Array::from(vec![Some(2), Some(3)]);
assert_eq!(array, expected);
}
/// tests extending from a primitive array with offset and nulls
#[test]
fn test_primitive_null_offset() {
let b = UInt8Array::from(vec![Some(1), None, Some(3)]);
let b = b.slice(1, 2);
let arrays = vec![b.data()];
let mut a = MutableArrayData::new(arrays, false, 2);
a.extend(0, 0, 2);
let result = a.freeze();
let array = UInt8Array::from(result);
let expected = UInt8Array::from(vec![None, Some(3)]);
assert_eq!(array, expected);
}
#[test]
fn test_primitive_null_offset_nulls() {
let b = UInt8Array::from(vec![Some(1), Some(2), Some(3)]);
let b = b.slice(1, 2);
let arrays = vec![b.data()];
let mut a = MutableArrayData::new(arrays, true, 2);
a.extend(0, 0, 2);
a.extend_nulls(3);
a.extend(0, 1, 2);
let result = a.freeze();
let array = UInt8Array::from(result);
let expected =
UInt8Array::from(vec![Some(2), Some(3), None, None, None, Some(3)]);
assert_eq!(array, expected);
}
#[test]
fn test_list_null_offset() -> Result<()> {
let int_builder = Int64Builder::new(24);
let mut builder = ListBuilder::<Int64Builder>::new(int_builder);
builder.values().append_slice(&[1, 2, 3])?;
builder.append(true)?;
builder.values().append_slice(&[4, 5])?;
builder.append(true)?;
builder.values().append_slice(&[6, 7, 8])?;
builder.append(true)?;
let array = builder.finish();
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 0, 1);
let result = mutable.freeze();
let array = ListArray::from(result);
let int_builder = Int64Builder::new(24);
let mut builder = ListBuilder::<Int64Builder>::new(int_builder);
builder.values().append_slice(&[1, 2, 3])?;
builder.append(true)?;
let expected = builder.finish();
assert_eq!(array, expected);
Ok(())
}
/// tests extending from a variable-sized (strings and binary) array w/ offset with nulls
#[test]
fn test_variable_sized_nulls() {
let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]);
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
let result = mutable.freeze();
let result = StringArray::from(result);
let expected = StringArray::from(vec![Some("bc"), None]);
assert_eq!(result, expected);
}
/// tests extending from a variable-sized (strings and binary) array
/// with an offset and nulls
#[test]
fn test_variable_sized_offsets() {
let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]);
let array = array.slice(1, 3);
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 0, 3);
let result = mutable.freeze();
let result = StringArray::from(result);
let expected = StringArray::from(vec![Some("bc"), None, Some("defh")]);
assert_eq!(result, expected);
}
#[test]
fn test_string_offsets() {
let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]);
let array = array.slice(1, 3);
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 0, 3);
let result = mutable.freeze();
let result = StringArray::from(result);
let expected = StringArray::from(vec![Some("bc"), None, Some("defh")]);
assert_eq!(result, expected);
}
#[test]
fn test_multiple_with_nulls() {
let array1 = StringArray::from(vec!["hello", "world"]);
let array2 = StringArray::from(vec![Some("1"), None]);
let arrays = vec![array1.data(), array2.data()];
let mut mutable = MutableArrayData::new(arrays, false, 5);
mutable.extend(0, 0, 2);
mutable.extend(1, 0, 2);
let result = mutable.freeze();
let result = StringArray::from(result);
let expected =
StringArray::from(vec![Some("hello"), Some("world"), Some("1"), None]);
assert_eq!(result, expected);
}
#[test]
fn test_string_null_offset_nulls() {
let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]);
let array = array.slice(1, 3);
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, true, 0);
mutable.extend(0, 1, 3);
mutable.extend_nulls(1);
let result = mutable.freeze();
let result = StringArray::from(result);
let expected = StringArray::from(vec![None, Some("defh"), None]);
assert_eq!(result, expected);
}
#[test]
fn test_bool() {
let array = BooleanArray::from(vec![Some(false), Some(true), None, Some(false)]);
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
let result = mutable.freeze();
let result = BooleanArray::from(result);
let expected = BooleanArray::from(vec![Some(true), None]);
assert_eq!(result, expected);
}
#[test]
fn test_null() {
let array1 = NullArray::new(10);
let array2 = NullArray::new(5);
let arrays = vec![array1.data(), array2.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
mutable.extend(1, 0, 1);
let result = mutable.freeze();
let result = NullArray::from(result);
let expected = NullArray::new(3);
assert_eq!(result, expected);
}
fn create_dictionary_array(values: &[&str], keys: &[Option<&str>]) -> ArrayData {
let values = StringArray::from(values.to_vec());
let mut builder = StringDictionaryBuilder::new_with_dictionary(
PrimitiveBuilder::<Int16Type>::new(3),
&values,
)
.unwrap();
for key in keys {
if let Some(v) = key {
builder.append(v).unwrap();
} else {
builder.append_null().unwrap()
}
}
builder.finish().data().clone()
}
#[test]
fn test_dictionary() {
// (a, b, c), (0, 1, 0, 2) => (a, b, a, c)
let array = create_dictionary_array(
&["a", "b", "c"],
&[Some("a"), Some("b"), None, Some("c")],
);
let arrays = vec![&array];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
let result = mutable.freeze();
let result = DictionaryArray::from(result);
let expected = Int16Array::from(vec![Some(1), None]);
assert_eq!(result.keys(), &expected);
}
#[test]
fn test_struct() {
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doe"),
]));
let ints: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
]));
let array =
StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())])
.unwrap();
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
let data = mutable.freeze();
let array = StructArray::from(data);
let expected = StructArray::try_from(vec![
("f1", strings.slice(1, 2)),
("f2", ints.slice(1, 2)),
])
.unwrap();
assert_eq!(array, expected)
}
#[test]
fn test_struct_offset() {
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doe"),
]));
let ints: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
]));
let array =
StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())])
.unwrap()
.slice(1, 3);
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
let data = mutable.freeze();
let array = StructArray::from(data);
let expected_strings: ArrayRef =
Arc::new(StringArray::from(vec![None, Some("mark")]));
let expected = StructArray::try_from(vec![
("f1", expected_strings),
("f2", ints.slice(2, 2)),
])
.unwrap();
assert_eq!(array, expected);
}
#[test]
fn test_struct_nulls() {
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doe"),
]));
let ints: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
Some(2),
None,
Some(4),
Some(5),
]));
let array =
StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())])
.unwrap();
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
let data = mutable.freeze();
let array = StructArray::from(data);
let expected_string = Arc::new(StringArray::from(vec![None, None])) as ArrayRef;
let expected_int = Arc::new(Int32Array::from(vec![Some(2), None])) as ArrayRef;
let expected =
StructArray::try_from(vec![("f1", expected_string), ("f2", expected_int)])
.unwrap();
assert_eq!(array, expected)
}
#[test]
fn test_struct_many() {
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doe"),
]));
let ints: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
Some(2),
None,
Some(4),
Some(5),
]));
let array =
StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())])
.unwrap();
let arrays = vec![array.data(), array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 3);
mutable.extend(1, 0, 2);
let data = mutable.freeze();
let array = StructArray::from(data);
let expected_string =
Arc::new(StringArray::from(vec![None, None, Some("joe"), None])) as ArrayRef;
let expected_int =
Arc::new(Int32Array::from(vec![Some(2), None, Some(1), Some(2)])) as ArrayRef;
let expected =
StructArray::try_from(vec![("f1", expected_string), ("f2", expected_int)])
.unwrap();
assert_eq!(array, expected)
}
#[test]
fn test_binary_fixed_sized_offsets() {
let array = FixedSizeBinaryArray::try_from_iter(
vec![vec![0, 0], vec![0, 1], vec![0, 2]].into_iter(),
)
.expect("Failed to create FixedSizeBinaryArray from iterable");
let array = array.slice(1, 2);
// = [[0, 1], [0, 2]] due to the offset = 1
let arrays = vec![array.data()];
let mut mutable = MutableArrayData::new(arrays, false, 0);
mutable.extend(0, 1, 2);
mutable.extend(0, 0, 1);
let result = mutable.freeze();
let result = FixedSizeBinaryArray::from(result);
let expected =
FixedSizeBinaryArray::try_from_iter(vec![vec![0, 2], vec![0, 1]].into_iter())
.expect("Failed to create FixedSizeBinaryArray from iterable");
assert_eq!(result, expected);
}
#[test]
fn test_list_append() -> Result<()> {
let mut builder = ListBuilder::<Int64Builder>::new(Int64Builder::new(24));
builder.values().append_slice(&[1, 2, 3])?;
builder.append(true)?;
builder.values().append_slice(&[4, 5])?;
builder.append(true)?;
builder.values().append_slice(&[6, 7, 8])?;
builder.values().append_slice(&[9, 10, 11])?;
builder.append(true)?;
let a = builder.finish();
let a_builder = Int64Builder::new(24);
let mut a_builder = ListBuilder::<Int64Builder>::new(a_builder);
a_builder.values().append_slice(&[12, 13])?;
a_builder.append(true)?;
a_builder.append(true)?;
a_builder.values().append_slice(&[14, 15])?;
a_builder.append(true)?;
let b = a_builder.finish();
let c = b.slice(1, 2);
let mut mutable =
MutableArrayData::new(vec![a.data(), b.data(), c.data()], false, 1);
mutable.extend(0, 0, a.len());
mutable.extend(1, 0, b.len());
mutable.extend(2, 0, c.len());
let finished = mutable.freeze();
let expected_int_array = Int64Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
Some(7),
Some(8),
Some(9),
Some(10),
Some(11),
// append first array
Some(12),
Some(13),
Some(14),
Some(15),
// append second array
Some(14),
Some(15),
]);
let list_value_offsets =
Buffer::from_slice_ref(&[0i32, 3, 5, 11, 13, 13, 15, 15, 17]);
let expected_list_data = ArrayData::try_new(
DataType::List(Box::new(Field::new("item", DataType::Int64, true))),
8,
None,
None,
0,
vec![list_value_offsets],
vec![expected_int_array.data().clone()],
)
.unwrap();
assert_eq!(finished, expected_list_data);
Ok(())
}
#[test]
fn test_list_nulls_append() -> Result<()> {
let mut builder = ListBuilder::<Int64Builder>::new(Int64Builder::new(32));
builder.values().append_slice(&[1, 2, 3])?;
builder.append(true)?;
builder.values().append_slice(&[4, 5])?;
builder.append(true)?;
builder.append(false)?;
builder.values().append_slice(&[6, 7, 8])?;
builder.values().append_null()?;
builder.values().append_null()?;
builder.values().append_slice(&[9, 10, 11])?;
builder.append(true)?;
let a = builder.finish();
let a = a.data();
let mut builder = ListBuilder::<Int64Builder>::new(Int64Builder::new(32));
builder.values().append_slice(&[12, 13])?;
builder.append(true)?;
builder.append(false)?;
builder.append(true)?;
builder.values().append_null()?;
builder.values().append_null()?;
builder.values().append_slice(&[14, 15])?;
builder.append(true)?;
let b = builder.finish();
let b = b.data();
let c = b.slice(1, 2);
let d = b.slice(2, 2);
let mut mutable = MutableArrayData::new(vec![a, b, &c, &d], false, 10);
mutable.extend(0, 0, a.len());
mutable.extend(1, 0, b.len());
mutable.extend(2, 0, c.len());
mutable.extend(3, 0, d.len());
let result = mutable.freeze();
let expected_int_array = Int64Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
Some(7),
Some(8),
None,
None,
Some(9),
Some(10),
Some(11),
// second array
Some(12),
Some(13),
None,
None,
Some(14),
Some(15),
// slice(1, 2) results in no values added
None,
None,
Some(14),
Some(15),
]);
let list_value_offsets =
Buffer::from_slice_ref(&[0, 3, 5, 5, 13, 15, 15, 15, 19, 19, 19, 19, 23]);
let expected_list_data = ArrayData::try_new(
DataType::List(Box::new(Field::new("item", DataType::Int64, true))),
12,
None,
Some(Buffer::from(&[0b11011011, 0b1110])),
0,
vec![list_value_offsets],
vec![expected_int_array.data().clone()],
)
.unwrap();
assert_eq!(result, expected_list_data);
Ok(())
}
#[test]
fn test_map_nulls_append() -> Result<()> {
let mut builder = MapBuilder::<Int64Builder, Int64Builder>::new(
None,
Int64Builder::new(32),
Int64Builder::new(32),
);
builder.keys().append_slice(&[1, 2, 3])?;
builder.values().append_slice(&[1, 2, 3])?;
builder.append(true)?;
builder.keys().append_slice(&[4, 5])?;
builder.values().append_slice(&[4, 5])?;
builder.append(true)?;
builder.append(false)?;
builder
.keys()
.append_slice(&[6, 7, 8, 100, 101, 9, 10, 11])?;
builder.values().append_slice(&[6, 7, 8])?;
builder.values().append_null()?;
builder.values().append_null()?;
builder.values().append_slice(&[9, 10, 11])?;
builder.append(true)?;
let a = builder.finish();
let a = a.data();
let mut builder = MapBuilder::<Int64Builder, Int64Builder>::new(
None,
Int64Builder::new(32),
Int64Builder::new(32),
);
builder.keys().append_slice(&[12, 13])?;
builder.values().append_slice(&[12, 13])?;
builder.append(true)?;
builder.append(false)?;
builder.append(true)?;
builder.keys().append_slice(&[100, 101, 14, 15])?;
builder.values().append_null()?;
builder.values().append_null()?;
builder.values().append_slice(&[14, 15])?;
builder.append(true)?;
let b = builder.finish();
let b = b.data();
let c = b.slice(1, 2);
let d = b.slice(2, 2);
let mut mutable = MutableArrayData::new(vec![a, b, &c, &d], false, 10);
mutable.extend(0, 0, a.len());
mutable.extend(1, 0, b.len());
mutable.extend(2, 0, c.len());
mutable.extend(3, 0, d.len());
let result = mutable.freeze();
let expected_key_array = Int64Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
Some(7),
Some(8),
Some(100),
Some(101),
Some(9),
Some(10),
Some(11),
// second array
Some(12),
Some(13),
Some(100),
Some(101),
Some(14),
Some(15),
// slice(1, 2) results in no values added
Some(100),
Some(101),
Some(14),
Some(15),
]);
let expected_value_array = Int64Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
Some(7),
Some(8),
None,
None,
Some(9),
Some(10),
Some(11),
// second array
Some(12),
Some(13),
None,
None,
Some(14),
Some(15),
// slice(1, 2) results in no values added
None,
None,
Some(14),
Some(15),
]);
let expected_entry_array = StructArray::from(vec![
(
Field::new("keys", DataType::Int64, false),
Arc::new(expected_key_array) as ArrayRef,
),
(
Field::new("values", DataType::Int64, true),
Arc::new(expected_value_array) as ArrayRef,
),
]);
let map_offsets =
Buffer::from_slice_ref(&[0, 3, 5, 5, 13, 15, 15, 15, 19, 19, 19, 19, 23]);
let expected_list_data = ArrayData::try_new(
DataType::Map(
Box::new(Field::new(
"entries",
DataType::Struct(vec![
Field::new("keys", DataType::Int64, false),
Field::new("values", DataType::Int64, true),
]),
false,
)),
false,
),
12,
None,
Some(Buffer::from(&[0b11011011, 0b1110])),
0,
vec![map_offsets],
vec![expected_entry_array.data().clone()],
)
.unwrap();
assert_eq!(result, expected_list_data);
Ok(())
}
#[test]
fn test_list_of_strings_append() -> Result<()> {
// [["alpha", "beta", None]]
let mut builder = ListBuilder::new(StringBuilder::new(32));
builder.values().append_value("Hello")?;
builder.values().append_value("Arrow")?;
builder.values().append_null()?;
builder.append(true)?;
let a = builder.finish();
// [["alpha", "beta"], [None], ["gamma", "delta", None]]
let mut builder = ListBuilder::new(StringBuilder::new(32));
builder.values().append_value("alpha")?;
builder.values().append_value("beta")?;
builder.append(true)?;
builder.values().append_null()?;
builder.append(true)?;
builder.values().append_value("gamma")?;
builder.values().append_value("delta")?;
builder.values().append_null()?;
builder.append(true)?;
let b = builder.finish();
let mut mutable = MutableArrayData::new(vec![a.data(), b.data()], false, 10);
mutable.extend(0, 0, a.len());
mutable.extend(1, 0, b.len());
mutable.extend(1, 1, 3);
mutable.extend(1, 0, 0);
let result = mutable.freeze();
let expected_string_array = StringArray::from(vec![
// extend a[0..a.len()]
// a[0]
Some("Hello"),
Some("Arrow"),
None,
// extend b[0..b.len()]
// b[0]
Some("alpha"),
Some("beta"),
// b[1]
None,
// b[2]
Some("gamma"),
Some("delta"),
None,
// extend b[1..3]
// b[1]
None,
// b[2]
Some("gamma"),
Some("delta"),
None,
// extend b[0..0]
]);
let list_value_offsets = Buffer::from_slice_ref(&[0, 3, 5, 6, 9, 10, 13]);
let expected_list_data = ArrayData::try_new(
DataType::List(Box::new(Field::new("item", DataType::Utf8, true))),
6,
None,
None,
0,
vec![list_value_offsets],
vec![expected_string_array.data().clone()],
)
.unwrap();
assert_eq!(result, expected_list_data);
Ok(())
}
#[test]
fn test_fixed_size_binary_append() {
let a = vec![Some(vec![1, 2]), Some(vec![3, 4]), Some(vec![5, 6])];
let a = FixedSizeBinaryArray::try_from_sparse_iter(a.into_iter())
.expect("Failed to create FixedSizeBinaryArray from iterable");
let b = vec![
None,
Some(vec![7, 8]),
Some(vec![9, 10]),
None,
Some(vec![13, 14]),
None,
];
let b = FixedSizeBinaryArray::try_from_sparse_iter(b.into_iter())
.expect("Failed to create FixedSizeBinaryArray from iterable");
let mut mutable = MutableArrayData::new(vec![a.data(), b.data()], false, 10);
mutable.extend(0, 0, a.len());
mutable.extend(1, 0, b.len());
mutable.extend(1, 1, 4);
mutable.extend(1, 2, 3);
mutable.extend(1, 5, 5);
let result = mutable.freeze();
let expected = vec![
// a
Some(vec![1, 2]),
Some(vec![3, 4]),
Some(vec![5, 6]),
// b
None,
Some(vec![7, 8]),
Some(vec![9, 10]),
None,
Some(vec![13, 14]),
None,
// b[1..4]
Some(vec![7, 8]),
Some(vec![9, 10]),
None,
// b[2..3]
Some(vec![9, 10]),
// b[4..4]
];
let expected = FixedSizeBinaryArray::try_from_sparse_iter(expected.into_iter())
.expect("Failed to create FixedSizeBinaryArray from iterable");
assert_eq!(&result, expected.data());
}
/*
// this is an old test used on a meanwhile removed dead code
// that is still useful when `MutableArrayData` supports fixed-size lists.
#[test]
fn test_fixed_size_list_append() -> Result<()> {
let int_builder = UInt16Builder::new(64);
let mut builder = FixedSizeListBuilder::<UInt16Builder>::new(int_builder, 2);
builder.values().append_slice(&[1, 2])?;
builder.append(true)?;
builder.values().append_slice(&[3, 4])?;
builder.append(false)?;
builder.values().append_slice(&[5, 6])?;
builder.append(true)?;
let a_builder = UInt16Builder::new(64);
let mut a_builder = FixedSizeListBuilder::<UInt16Builder>::new(a_builder, 2);
a_builder.values().append_slice(&[7, 8])?;
a_builder.append(true)?;
a_builder.values().append_slice(&[9, 10])?;
a_builder.append(true)?;
a_builder.values().append_slice(&[11, 12])?;
a_builder.append(false)?;
a_builder.values().append_slice(&[13, 14])?;
a_builder.append(true)?;
a_builder.values().append_null()?;
a_builder.values().append_null()?;
a_builder.append(true)?;
let a = a_builder.finish();
// append array
builder.append_data(&[
a.data(),
a.slice(1, 3).data(),
a.slice(2, 1).data(),
a.slice(5, 0).data(),
])?;
let finished = builder.finish();
let expected_int_array = UInt16Array::from(vec![
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
// append first array
Some(7),
Some(8),
Some(9),
Some(10),
Some(11),
Some(12),
Some(13),
Some(14),
None,
None,
// append slice(1, 3)
Some(9),
Some(10),
Some(11),
Some(12),
Some(13),
Some(14),
// append slice(2, 1)
Some(11),
Some(12),
]);
let expected_list_data = ArrayData::new(
DataType::FixedSizeList(
Box::new(Field::new("item", DataType::UInt16, true)),
2,
),
12,
None,
None,
0,
vec![],
vec![expected_int_array.data()],
);
let expected_list =
FixedSizeListArray::from(Arc::new(expected_list_data) as ArrayData);
assert_eq!(&expected_list.values(), &finished.values());
assert_eq!(expected_list.len(), finished.len());
Ok(())
}
*/
}
| 34.674897 | 126 | 0.527975 |
2260b9d275bb2afc4459ace0e5677becf5ebdb95 | 1,157 | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {argh::FromArgs, ffx_core::ffx_command, ffx_emulator_sub_command::Subcommand};
/// entry point for ffx
#[ffx_command()]
#[derive(FromArgs, Debug, PartialEq)]
#[argh(
subcommand,
name = "emu",
description = "Start and manage Fuchsia emulators.",
note = "The emu command is used to start up, manage, and shut down Fuchsia emulators.
The `start` subcommand launches an emulator according to the configuration in
the Product Bundle. Once one or more emulators are running, you can use the
`list` subcommand to see the name and status of all running emulators, and the
`show` subcommand to get a printout of the configuration for a specific
emulator. When you're done with an emulator, use the `stop` subcommand to
cleanly terminate that emulator.
For more information on the Fuchsia emulator, see the Getting Started page at
https://fuchsia.dev/fuchsia-src/get-started/set_up_femu."
)]
pub struct EmulatorCommand {
#[argh(subcommand)]
pub subcommand: Subcommand,
}
| 38.566667 | 89 | 0.756266 |
6222f82bd6c633ef599137334cb3012b4a74eb33 | 1,801 | //! # google-cloud-storage
//!
//! Google Cloud Platform Storage Client library.
//!
//! * [About Cloud Storage](https://cloud.google.com/storage/)
//! * [JSON API Documentation](https://cloud.google.com/storage/docs/json_api/v1)
//!
//! ## Quick Start
//!
//! ### Publish Message
//!
//! ```
//! use google_cloud_storage::client::Client;
//! use google_cloud_storage::http::Error;
//! use google_cloud_storage::sign::SignedURLOptions;
//! use google_cloud_storage::sign::SignedURLMethod;
//! use google_cloud_storage::http::objects::get::GetObjectRequest;
//! use google_cloud_storage::http::objects::upload::UploadObjectRequest;
//! use tokio::task::JoinHandle;
//! use std::fs::File;
//! use std::io::BufReader;
//! use std::io::Read;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//!
//! // Create client.
//! let mut client = Client::new().await.unwrap();
//!
//! // Upload the file
//! let uploaded = client.upload_object(&UploadObjectRequest {
//! bucket: "bucket".to_string(),
//! name: "file.png".to_string(),
//! ..Default::default()
//! }, "hello world".as_bytes(), "application/octet-stream", None).await;
//!
//! // Download the file
//! let data = client.download_object(&GetObjectRequest {
//! bucket: "bucket".to_string(),
//! object: "file.png".to_string(),
//! ..Default::default()
//! }, None).await;
//!
//! // Create signed url.
//! let url_for_download = client.signed_url("bucket", "foo.txt", SignedURLOptions::default());
//! let url_for_upload = client.signed_url("bucket", "foo.txt", SignedURLOptions {
//! method: SignedURLMethod::PUT,
//! ..Default::default()
//! });
//! Ok(())
//! }
//! ```
pub mod client;
pub mod http;
pub mod sign;
| 31.596491 | 99 | 0.607996 |
5de38a6fc2b9ac4eb1d0a3fd282f5c8a63e851ce | 4,634 | use std::collections::HashMap;
use crate::util::unique;
use crate::BaseCustom;
use std::ops::Range;
use std::fmt;
impl BaseCustom<char> {
/// 'new' creates a new BaseCustom instance and propogates the values for converting
/// numeric bases.
///
/// `new` for `BaseCustom<char>` requires a `Vec<char>` as its parameters and units
/// for measuring the custom numeric base will only be one character long each.
pub fn new(chars: Vec<char>) -> BaseCustom<char> {
if chars.iter().count() < 2 { panic!("Too few numeric units! Provide two or more.") }
if chars.iter().count() > 255 { panic!("Too many numeric units!") }
let chars = unique(chars);
let mut mapped = HashMap::with_capacity(chars.iter().count());
for (i,c) in chars.iter().enumerate() {
mapped.insert(c.clone(), i as u8);
}
BaseCustom::<char> {
primitives: chars.clone(),
primitives_hash: mapped,
base: chars.iter().count() as u64,
delim: None,
}
}
/// `gen` returns a String computed from the character mapping and
/// positional values the given u64 parameter evalutes to for your
/// custom base
///
/// # Example
/// ```
/// use base_custom::BaseCustom;
///
/// let base2 = BaseCustom::<char>::new(vec!['0','1']);
/// assert_eq!(base2.gen(3), "11");
/// ```
///
/// # Output
/// ```text
/// "11"
/// ```
pub fn gen(&self, input_val: u64) -> String {
if input_val == 0 {
return format!("{}", self.primitives[0]);
}
let mut number = input_val;
let mut result = String::new();
loop {
if number == 0 { break };
result.insert(0, self.primitives[(number % self.base) as usize]);
number = number/self.base;
};
format!("{}", result)
}
/// `char` returns a char straight from the character mapping.
/// decimal value must be within character range for a Some result.
///
/// # Example
/// ```
/// use base_custom::BaseCustom;
///
/// let base10 = BaseCustom::<char>::new("0123456789".chars().collect());
/// assert_eq!(base10.char(9), Some('9'));
/// ```
///
/// # Output
/// ```text
/// '9'
/// ```
pub fn char(&self, input_val: usize) -> Option<char> {
if input_val > self.primitives.len() { return None }
Some(self.primitives[input_val])
}
/// `decimal` returns a u64 value on computed from the units that form
/// the custom base.
///
/// # Example
/// ```
/// use base_custom::BaseCustom;
///
/// let base2 = BaseCustom::<char>::new(vec!['0','1']);
/// assert_eq!(base2.decimal("00011"), 3);
/// ```
///
/// # Output
/// ```text
/// 3
/// ```
pub fn decimal<S>(&self, input_val: S) -> u64
where S: Into<String> {
let input_val = input_val.into();
input_val.chars().rev().enumerate().fold(0, |sum, (i, chr)|
sum + (self.primitives_hash[&chr] as u64) * self.base.pow(i as u32)
)
}
/// Returns the zero value of your custom base
pub fn zero(&self) -> &char {
&self.primitives[0]
}
/// Returns the one value of your custom base
pub fn one(&self) -> &char {
&self.primitives[1]
}
/// Returns the nth value of your custom base
///
/// Like most indexing operations, the count starts from zero, so nth(0) returns the first value,
/// nth(1) the second, and so on.
pub fn nth(&self, pos: usize) -> Option<&char> {
if pos < self.base as usize {
Some(&self.primitives[pos])
} else {
None
}
}
/// Create a custom numeric base from an ascii range of ordinal values
///
/// This method currently restricts the ascii character range of the
/// 95 typical characters starting from 32 and ending with 127. If you'd
/// like to use characters outside of this range please use the `new` method.
pub fn from_ordinal_range(range: Range<u32>) -> BaseCustom<char> {
let min = std::cmp::max(32, range.start);
let max = std::cmp::min(127, range.end);
let mut chars: Vec<char> = Vec::with_capacity(std::cmp::min(range.len(), 95));
for chr in min..max {
chars.push(std::char::from_u32(chr).unwrap());
}
BaseCustom::<char>::new(chars)
}
}
impl PartialEq for BaseCustom<char> {
fn eq(&self, other: &BaseCustom<char>) -> bool {
self.primitives == other.primitives &&
self.base == other.base &&
self.delim == other.delim
}
}
impl fmt::Debug for BaseCustom<char> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"BaseCustom\n\tprimitives: {:?}\n\tprimitives_hash: {:?}\n\tbase: {}\n\tdelim: {:?}",
self.primitives, self.primitives_hash, self.base, self.delim
)
}
}
| 28.782609 | 99 | 0.597972 |
e4e46c78dde8322196ec03f1e1084090e80224e0 | 1,386 | use anyhow::Context;
use dirs_next::home_dir;
use serde::{Deserialize, Serialize};
use std::fs;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct ConfigFile {
pub recursive: Option<bool>,
}
pub fn parse() -> anyhow::Result<ConfigFile> {
let dir = folder_path();
fs::create_dir_all(dir)?;
let file = file_path();
let does_exist = Path::new(file.as_path()).exists();
if does_exist {
read(file)
} else {
let config = ConfigFile {
recursive: Some(true),
};
write(config)
}
}
fn folder_path() -> PathBuf {
let mut dir = PathBuf::new();
dir.push(home_dir().unwrap());
dir.push(".glass");
dir
}
fn file_path() -> PathBuf {
let mut file = folder_path();
file.push("glass.json");
file
}
pub fn write(config: ConfigFile) -> anyhow::Result<ConfigFile> {
let dir = folder_path();
fs::create_dir_all(dir)?;
let file = file_path();
let json = serde_json::to_string(&config).unwrap();
let mut physical_file = fs::File::create(file.as_path())?;
physical_file.write_all(json.as_bytes())?;
Ok(config)
}
pub fn read(path: PathBuf) -> anyhow::Result<ConfigFile> {
let file = fs::File::open(path.as_path());
match serde_json::from_reader(file.unwrap()) {
Ok(json) => Ok(json),
Err(err) => Err(anyhow::Error::new(err))
.with_context(|| "Error parsing config file. Try deleting the file."),
}
}
| 23.896552 | 73 | 0.670996 |
4a07358035310f3a7110b0893d5442e1deef52f1 | 626 | mod global;
mod raii;
use log::info;
pub use raii::RaiiFrameAllocator;
use spin::Mutex;
use x86_64::structures::paging::FrameAllocator;
use self::global::{GlobalFrameAllocator, FRAME_ALLOCATOR};
use crate::BOOT_INFO;
pub fn init() {
let boot_info = BOOT_INFO.get().unwrap();
FRAME_ALLOCATOR.call_once(|| Mutex::new(GlobalFrameAllocator::new(boot_info)));
{
let mut allocator = FRAME_ALLOCATOR.get().unwrap().lock();
let _test_frame = allocator
.allocate_frame()
.expect("failed to allocate test frame");
}
info!("initialized frame allocator from boot info");
}
| 25.04 | 83 | 0.678914 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.