hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
034a323c987dfe2361aecda5f0c2c205ad651674 | 635 | impl Solution {
pub fn dominant_index(nums: Vec<i32>) -> i32 {
if nums.len() == 1 {
return 0;
}
let mut a = nums[0];
let mut b = nums[1];
let mut ans = 0;
if a < b {
let t = a;
a = b;
b = t;
ans = 1;
}
for i in 2..nums.len() {
if a < nums[i] {
b = a;
a = nums[i];
ans = i;
} else if b < nums[i] {
b = nums[i];
}
}
if a >= 2 * b {
return ans as i32;
}
-1
}
}
| 20.483871 | 50 | 0.289764 |
11dfd15ae8b198bda4f479db1fc50e8d0e96ad6f | 2,526 | use crate::*;
use std::convert::TryInto;
pub type LockupIndex = u32;
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
#[cfg_attr(not(target_arch = "wasm32"), derive(Debug, PartialEq))]
pub struct LockupClaim {
pub index: LockupIndex,
pub unclaimed_balance: WrappedBalance,
pub is_final: bool,
}
#[derive(BorshDeserialize, BorshSerialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
#[cfg_attr(
not(target_arch = "wasm32"),
derive(Debug, PartialEq, Clone, Serialize)
)]
pub struct Lockup {
pub account_id: ValidAccountId,
pub schedule: Schedule,
#[serde(default)]
#[serde(with = "u128_dec_format")]
pub claimed_balance: Balance,
/// An optional configuration that allows vesting/lockup termination.
pub termination_config: Option<TerminationConfig>,
}
impl Lockup {
pub fn new_unlocked(account_id: AccountId, total_balance: Balance) -> Self {
Self {
account_id: account_id.try_into().unwrap(),
schedule: Schedule::new_unlocked(total_balance),
claimed_balance: 0,
termination_config: None,
}
}
pub fn claim(&mut self, index: LockupIndex) -> LockupClaim {
let unlocked_balance = self.schedule.unlocked_balance(current_timestamp_sec());
assert!(unlocked_balance >= self.claimed_balance, "Invariant");
let unclaimed_balance = unlocked_balance - self.claimed_balance;
self.claimed_balance = unlocked_balance;
LockupClaim {
index,
unclaimed_balance: unclaimed_balance.into(),
is_final: unlocked_balance == self.schedule.total_balance(),
}
}
pub fn assert_new_valid(&self, total_balance: Balance) {
assert_eq!(
self.claimed_balance, 0,
"The initial lockup claimed balance should be 0"
);
self.schedule.assert_valid(total_balance);
if let Some(termination_config) = &self.termination_config {
match &termination_config.vesting_schedule {
None => {
// Ok, using lockup schedule.
}
Some(HashOrSchedule::Hash(_hash)) => {
// Ok, using unknown hash. Can't verify.
}
Some(HashOrSchedule::Schedule(schedule)) => {
schedule.assert_valid(total_balance);
self.schedule.assert_valid_termination_schedule(&schedule);
}
}
}
}
}
| 32.805195 | 87 | 0.62114 |
26990017fb0c9985914cda3e14b0b402bed8a635 | 938 | use crate::jcli_lib::certificate::{write_cert, Error};
use chain_impl_mockchain::certificate::{self, Certificate, VotePlanId};
use std::path::PathBuf;
use structopt::StructOpt;
/// create an encrypted vote tally certificate
///
/// voteplan id needs to be provided
#[derive(StructOpt)]
#[structopt(rename_all = "kebab-case")]
pub struct EncryptedVoteTally {
/// vote plan id
///
/// the vote plan identifier on the blockchain
#[structopt(long = "vote-plan-id")]
pub id: VotePlanId,
/// write the output to the given file or print it to the standard output if not defined
#[structopt(long = "output")]
pub output: Option<PathBuf>,
}
impl EncryptedVoteTally {
pub fn exec(self) -> Result<(), Error> {
let vote_tally = certificate::EncryptedVoteTally::new(self.id);
let cert = Certificate::EncryptedVoteTally(vote_tally);
write_cert(self.output.as_deref(), cert.into())
}
}
| 31.266667 | 92 | 0.687633 |
6af45ca435f9c6e8ab2d698d777b10dfcfec6eb1 | 301 | #[doc = "Reader of register PEEK16"]
pub type R = crate::R<u32, super::PEEK16>;
#[doc = "Reader of field `STAT`"]
pub type STAT_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - STAT"]
#[inline(always)]
pub fn stat(&self) -> STAT_R {
STAT_R::new((self.bits & 0x01) != 0)
}
}
| 25.083333 | 44 | 0.561462 |
69b4b985c67c1b8844febd0a7720696841a8781b | 2,207 | use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::vec::Vec;
pub fn run<P, R, W>(code_path: P, input: R, output: &mut W)
where P: AsRef<Path>, R: Read, W: Write
{
let code_file = File::open(code_path).unwrap();
let code: Vec<char> = code_file.bytes()
.map(|byte_result| byte_result.unwrap() as char)
.collect();
let mut code_pointer = 0;
let mut input = input.bytes();
let mut tape: Vec<u8> = vec![0];
let mut tape_pointer = 0;
while code_pointer < code.len() {
match code[code_pointer] {
'>' => {
tape_pointer += 1;
if tape_pointer >= tape.len() {
tape.push(0)
}
}
'<' => tape_pointer -= 1,
'+' => tape[tape_pointer] = tape[tape_pointer].wrapping_add(1),
'-' => tape[tape_pointer] = tape[tape_pointer].wrapping_sub(1),
'.' => {
output.write_all(&[tape[tape_pointer]]).unwrap();
output.flush().unwrap()
},
',' => tape[tape_pointer] = match input.next() {
Some(x) => x.unwrap(),
None => 0,
},
'[' => {
if tape[tape_pointer] == 0 {
let mut count = 1;
while count > 0 {
code_pointer += 1;
if code[code_pointer] == '[' {
count += 1
} else if code[code_pointer] == ']' {
count -= 1
}
}
}
}
']' => {
if tape[tape_pointer] != 0 {
let mut count = 1;
while count > 0 {
code_pointer -= 1;
if code[code_pointer] == '[' {
count -= 1
} else if code[code_pointer] == ']' {
count += 1
}
}
}
}
_ => (),
}
code_pointer += 1;
}
}
| 31.084507 | 75 | 0.371998 |
db6377f92a11d997869dafc69141ced97a6892f8 | 677 | pub mod rvm;
use self::rvm::RvmGenerator;
pub enum Cmd {
Help,
Rvm(RvmGenerator),
Invalid,
}
pub struct Config {
kind: String,
params: Vec<String>,
}
impl Config {
pub fn new(args: &[String]) -> Option<Config> {
if args.len() >= 2 {
let kind = args[1].clone();
let params: Vec<String> = args[2..].to_vec();
Some(Config { kind, params })
} else {
None
}
}
pub fn resolve(&self) -> Cmd {
match self.kind.as_ref() {
"help" => Cmd::Help,
"rvm" => Cmd::Rvm(RvmGenerator::new(&self.params)),
_ => Cmd::Invalid,
}
}
}
| 18.805556 | 63 | 0.480059 |
48ab6efd8da4e1134b31acc5970212e307f03cf1 | 17,328 | use gleam_core::{
error::{Error, FileIoAction, FileKind},
io::{
CommandExecutor, DirEntry, FileSystemIO, FileSystemWriter, OutputFile, ReadDir,
WrappedReader, WrappedWriter,
},
Result,
};
use lazy_static::lazy_static;
use std::{
ffi::OsStr,
fmt::Debug,
fs::File,
io::{self, BufRead, BufReader, Write},
path::{Path, PathBuf},
process::Stdio,
};
/// A `FileWriter` implementation that writes to the file system.
#[derive(Debug, Clone, Copy)]
pub struct ProjectIO;
impl ProjectIO {
pub fn new() -> Self {
Self
}
pub fn boxed() -> Box<Self> {
Box::new(Self::new())
}
}
impl gleam_core::io::FileSystemReader for ProjectIO {
fn gleam_source_files(&self, dir: &Path) -> Box<dyn Iterator<Item = PathBuf>> {
Box::new({
let dir = dir.to_path_buf();
walkdir::WalkDir::new(dir.clone())
.follow_links(true)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_type().is_file())
.map(|d| d.into_path())
.filter(move |d| is_gleam_path(d, dir.clone()))
})
}
fn gleam_metadata_files(&self, dir: &Path) -> Box<dyn Iterator<Item = PathBuf>> {
Box::new({
let dir = dir.to_path_buf();
walkdir::WalkDir::new(dir)
.follow_links(true)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_type().is_file())
.map(|d| d.into_path())
.filter(|p| p.extension().and_then(OsStr::to_str) == Some("gleam_module"))
})
}
fn read(&self, path: &Path) -> Result<String, Error> {
read(path)
}
fn is_file(&self, path: &Path) -> bool {
path.is_file()
}
fn is_directory(&self, path: &Path) -> bool {
path.is_dir()
}
fn reader(&self, path: &Path) -> Result<WrappedReader, Error> {
reader(path)
}
fn read_dir(&self, path: &Path) -> Result<ReadDir> {
read_dir(path).map(|entries| {
entries
.map(|result| result.map(|entry| DirEntry::from_path(entry.path())))
.collect()
})
}
}
impl FileSystemWriter for ProjectIO {
fn writer(&self, path: &Path) -> Result<WrappedWriter, Error> {
writer(path)
}
fn delete(&self, path: &Path) -> Result<()> {
delete_dir(path)
}
fn copy(&self, from: &Path, to: &Path) -> Result<()> {
copy(from, to)
}
fn copy_dir(&self, from: &Path, to: &Path) -> Result<()> {
copy_dir(from, to)
}
fn mkdir(&self, path: &Path) -> Result<(), Error> {
mkdir(path)
}
fn hardlink(&self, from: &Path, to: &Path) -> Result<(), Error> {
hardlink(from, to)
}
fn symlink_dir(&self, from: &Path, to: &Path) -> Result<(), Error> {
symlink_dir(from, to)
}
fn delete_file(&self, path: &Path) -> Result<()> {
delete_file(path)
}
}
impl CommandExecutor for ProjectIO {
fn exec(
&self,
program: &str,
args: &[String],
env: &[(&str, String)],
cwd: Option<&Path>,
quiet: bool,
) -> Result<i32, Error> {
tracing::debug!(program=program, args=?args.join(" "), env=?env, cwd=?cwd, "command_exec");
let stdout = if quiet {
Stdio::null()
} else {
Stdio::inherit()
};
let result = std::process::Command::new(program)
.args(args)
.stdin(Stdio::null())
.stdout(stdout)
.envs(env.iter().map(|(a, b)| (a, b)))
.current_dir(cwd.unwrap_or_else(|| Path::new("./")))
.status();
match result {
Ok(status) => Ok(status.code().unwrap_or_default()),
Err(error) => Err(match error.kind() {
io::ErrorKind::NotFound => Error::ShellProgramNotFound {
program: program.to_string(),
},
other => Error::ShellCommand {
program: program.to_string(),
err: Some(other),
},
}),
}
}
}
impl FileSystemIO for ProjectIO {}
pub fn delete_dir(dir: &Path) -> Result<(), Error> {
tracing::debug!(path=?dir, "deleting_directory");
if dir.exists() {
std::fs::remove_dir_all(&dir).map_err(|e| Error::FileIo {
action: FileIoAction::Delete,
kind: FileKind::Directory,
path: dir.to_path_buf(),
err: Some(e.to_string()),
})?;
} else {
tracing::debug!(path=?dir, "directory_did_not_exist_for_deletion");
}
Ok(())
}
pub fn delete_file(file: &Path) -> Result<(), Error> {
tracing::debug!("Deleting file {:?}", file);
if file.exists() {
std::fs::remove_file(&file).map_err(|e| Error::FileIo {
action: FileIoAction::Delete,
kind: FileKind::File,
path: file.to_path_buf(),
err: Some(e.to_string()),
})?;
} else {
tracing::debug!("Did not exist for deletion: {:?}", file);
}
Ok(())
}
pub fn write_outputs_under(outputs: &[OutputFile], base: &Path) -> Result<(), Error> {
for file in outputs {
write_output_under(file, base)?;
}
Ok(())
}
pub fn write_outputs(outputs: &[OutputFile]) -> Result<(), Error> {
for file in outputs {
write_output(file)?;
}
Ok(())
}
pub fn write_output_under(file: &OutputFile, base: &Path) -> Result<(), Error> {
let OutputFile { path, text } = file;
write(&base.join(path), text)
}
pub fn write_output(file: &OutputFile) -> Result<(), Error> {
let OutputFile { path, text } = file;
write(path, text)
}
pub fn write(path: &Path, text: &str) -> Result<(), Error> {
write_bytes(path, text.as_bytes())
}
#[cfg(target_family = "unix")]
pub fn make_executable(path: impl AsRef<Path>) -> Result<(), Error> {
use std::os::unix::fs::PermissionsExt;
tracing::debug!(path = ?path.as_ref(), "setting_permissions");
std::fs::set_permissions(path.as_ref(), std::fs::Permissions::from_mode(0o755)).map_err(
|e| Error::FileIo {
action: FileIoAction::UpdatePermissions,
kind: FileKind::File,
path: path.as_ref().to_path_buf(),
err: Some(e.to_string()),
},
)?;
Ok(())
}
#[cfg(not(target_family = "unix"))]
pub fn make_executable(_path: impl AsRef<Path>) -> Result<(), Error> {
Ok(())
}
pub fn writer(path: &Path) -> Result<WrappedWriter, Error> {
tracing::debug!(path = ?path, "opening_file_writer");
let dir_path = path.parent().ok_or_else(|| Error::FileIo {
action: FileIoAction::FindParent,
kind: FileKind::Directory,
path: path.to_path_buf(),
err: None,
})?;
std::fs::create_dir_all(dir_path).map_err(|e| Error::FileIo {
action: FileIoAction::Create,
kind: FileKind::Directory,
path: dir_path.to_path_buf(),
err: Some(e.to_string()),
})?;
let file = File::create(&path).map_err(|e| Error::FileIo {
action: FileIoAction::Create,
kind: FileKind::File,
path: path.to_path_buf(),
err: Some(e.to_string()),
})?;
Ok(WrappedWriter::new(path, Box::new(file)))
}
pub fn write_bytes(path: &Path, bytes: &[u8]) -> Result<(), Error> {
tracing::debug!(path=?path, "writing_file");
let dir_path = path.parent().ok_or_else(|| Error::FileIo {
action: FileIoAction::FindParent,
kind: FileKind::Directory,
path: path.to_path_buf(),
err: None,
})?;
std::fs::create_dir_all(dir_path).map_err(|e| Error::FileIo {
action: FileIoAction::Create,
kind: FileKind::Directory,
path: dir_path.to_path_buf(),
err: Some(e.to_string()),
})?;
let mut f = File::create(&path).map_err(|e| Error::FileIo {
action: FileIoAction::Create,
kind: FileKind::File,
path: path.to_path_buf(),
err: Some(e.to_string()),
})?;
f.write_all(bytes).map_err(|e| Error::FileIo {
action: FileIoAction::WriteTo,
kind: FileKind::File,
path: path.to_path_buf(),
err: Some(e.to_string()),
})?;
Ok(())
}
fn is_gleam_path(path: &Path, dir: impl AsRef<Path>) -> bool {
use regex::Regex;
lazy_static! {
static ref RE: Regex = Regex::new(&format!(
"^({module}{slash})*{module}\\.gleam$",
module = "[a-z][_a-z0-9]*",
slash = "(/|\\\\)",
))
.expect("is_gleam_path() RE regex");
}
RE.is_match(
path.strip_prefix(dir)
.expect("is_gleam_path(): strip_prefix")
.to_str()
.expect("is_gleam_path(): to_str"),
)
}
#[test]
fn is_gleam_path_test() {
assert!(is_gleam_path(
Path::new("/some-prefix/a.gleam"),
Path::new("/some-prefix/")
));
assert!(is_gleam_path(
Path::new("/some-prefix/one_two/a.gleam"),
Path::new("/some-prefix/")
));
assert!(is_gleam_path(
Path::new("/some-prefix/one_two/a123.gleam"),
Path::new("/some-prefix/")
));
assert!(is_gleam_path(
Path::new("/some-prefix/one_2/a123.gleam"),
Path::new("/some-prefix/")
));
}
pub fn gleam_files(dir: &Path) -> impl Iterator<Item = PathBuf> + '_ {
walkdir::WalkDir::new(dir)
.follow_links(true)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_type().is_file())
.map(|d| d.into_path())
.filter(move |d| is_gleam_path(d, dir))
}
pub fn gleam_files_excluding_gitignore(dir: &Path) -> impl Iterator<Item = PathBuf> + '_ {
ignore::WalkBuilder::new(dir)
.follow_links(true)
.require_git(false)
.build()
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_type().map(|t| t.is_file()).unwrap_or(false))
.map(ignore::DirEntry::into_path)
.filter(move |d| is_gleam_path(d, dir))
}
pub fn native_files(dir: &Path) -> Result<impl Iterator<Item = PathBuf> + '_> {
Ok(read_dir(dir)?
.flat_map(Result::ok)
.map(|e| e.path())
.filter(|path| {
let extension = path
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
extension == "erl" || extension == "hrl" || extension == "js" || extension == "mjs"
}))
}
pub fn erlang_files(dir: &Path) -> Result<impl Iterator<Item = PathBuf> + '_> {
Ok(read_dir(dir)?
.flat_map(Result::ok)
.map(|e| e.path())
.filter(|path| {
let extension = path
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
extension == "erl" || extension == "hrl"
}))
}
pub fn create_tar_archive(outputs: Vec<OutputFile>) -> Result<Vec<u8>, Error> {
tracing::debug!("creating_tar_archive");
let encoder = flate2::write::GzEncoder::new(vec![], flate2::Compression::default());
let mut builder = tar::Builder::new(encoder);
for file in outputs {
let mut header = tar::Header::new_gnu();
header.set_path(&file.path).map_err(|e| Error::AddTar {
path: file.path.clone(),
err: e.to_string(),
})?;
header.set_size(file.text.as_bytes().len() as u64);
header.set_cksum();
builder
.append(&header, file.text.as_bytes())
.map_err(|e| Error::AddTar {
path: file.path.clone(),
err: e.to_string(),
})?;
}
builder
.into_inner()
.map_err(|e| Error::TarFinish(e.to_string()))?
.finish()
.map_err(|e| Error::Gzip(e.to_string()))
}
pub fn mkdir(path: impl AsRef<Path> + Debug) -> Result<(), Error> {
tracing::debug!(path=?path, "creating_directory");
std::fs::create_dir_all(&path).map_err(|err| Error::FileIo {
kind: FileKind::Directory,
path: PathBuf::from(path.as_ref()),
action: FileIoAction::Create,
err: Some(err.to_string()),
})
}
pub fn read_dir(path: impl AsRef<Path> + Debug) -> Result<std::fs::ReadDir, Error> {
tracing::debug!(path=?path,"reading_directory");
std::fs::read_dir(&path).map_err(|e| Error::FileIo {
action: FileIoAction::Read,
kind: FileKind::Directory,
path: PathBuf::from(path.as_ref()),
err: Some(e.to_string()),
})
}
pub fn gleam_modules_metadata_paths(
path: impl AsRef<Path> + Debug,
) -> Result<impl Iterator<Item = PathBuf>, Error> {
Ok(read_dir(path)?
.into_iter()
.filter_map(Result::ok)
.map(|f| f.path())
.filter(|p| p.extension().and_then(OsStr::to_str) == Some("gleam_module")))
}
pub fn read(path: impl AsRef<Path> + Debug) -> Result<String, Error> {
tracing::debug!(path=?path,"reading_file");
std::fs::read_to_string(&path).map_err(|err| Error::FileIo {
action: FileIoAction::Read,
kind: FileKind::File,
path: PathBuf::from(path.as_ref()),
err: Some(err.to_string()),
})
}
pub fn reader(path: impl AsRef<Path> + Debug) -> Result<WrappedReader, Error> {
tracing::debug!(path=?path,"opening_file_reader");
let reader = File::open(&path).map_err(|err| Error::FileIo {
action: FileIoAction::Open,
kind: FileKind::File,
path: PathBuf::from(path.as_ref()),
err: Some(err.to_string()),
})?;
Ok(WrappedReader::new(path.as_ref(), Box::new(reader)))
}
pub fn buffered_reader<P: AsRef<Path> + Debug>(path: P) -> Result<impl BufRead, Error> {
tracing::debug!(path=?path,"opening_file_buffered_reader");
let reader = File::open(&path).map_err(|err| Error::FileIo {
action: FileIoAction::Open,
kind: FileKind::File,
path: PathBuf::from(path.as_ref()),
err: Some(err.to_string()),
})?;
Ok(BufReader::new(reader))
}
pub fn copy(path: impl AsRef<Path> + Debug, to: impl AsRef<Path> + Debug) -> Result<(), Error> {
tracing::debug!(from=?path, to=?to, "copying_file");
// TODO: include the destination in the error message
std::fs::copy(&path, &to)
.map_err(|err| Error::FileIo {
action: FileIoAction::Copy,
kind: FileKind::File,
path: PathBuf::from(path.as_ref()),
err: Some(err.to_string()),
})
.map(|_| ())
}
// pub fn rename(path: impl AsRef<Path> + Debug, to: impl AsRef<Path> + Debug) -> Result<(), Error> {
// tracing::debug!(from=?path, to=?to, "renaming_file");
// // TODO: include the destination in the error message
// std::fs::rename(&path, &to)
// .map_err(|err| Error::FileIo {
// action: FileIoAction::Rename,
// kind: FileKind::File,
// path: PathBuf::from(path.as_ref()),
// err: Some(err.to_string()),
// })
// .map(|_| ())
// }
pub fn copy_dir(path: impl AsRef<Path> + Debug, to: impl AsRef<Path> + Debug) -> Result<(), Error> {
tracing::debug!(from=?path, to=?to, "copying_directory");
// TODO: include the destination in the error message
fs_extra::dir::copy(&path, &to, &fs_extra::dir::CopyOptions::new())
.map_err(|err| Error::FileIo {
action: FileIoAction::Copy,
kind: FileKind::Directory,
path: PathBuf::from(path.as_ref()),
err: Some(err.to_string()),
})
.map(|_| ())
}
pub fn symlink_dir(
src: impl AsRef<Path> + Debug,
dest: impl AsRef<Path> + Debug,
) -> Result<(), Error> {
tracing::debug!(src=?src, dest=?dest, "symlinking");
symlink::symlink_dir(&canonicalise(src.as_ref())?, dest.as_ref()).map_err(|err| {
Error::FileIo {
action: FileIoAction::Link,
kind: FileKind::File,
path: PathBuf::from(dest.as_ref()),
err: Some(err.to_string()),
}
})?;
Ok(())
}
pub fn hardlink(from: impl AsRef<Path> + Debug, to: impl AsRef<Path> + Debug) -> Result<(), Error> {
tracing::debug!(from=?from, to=?to, "hardlinking");
std::fs::hard_link(&from, &to)
.map_err(|err| Error::FileIo {
action: FileIoAction::Link,
kind: FileKind::File,
path: PathBuf::from(from.as_ref()),
err: Some(err.to_string()),
})
.map(|_| ())
}
pub fn git_init(path: &Path) -> Result<(), Error> {
tracing::debug!(path=?path, "initializing git");
let args = vec!["init".into(), "--quiet".into(), path.display().to_string()];
match ProjectIO::new().exec("git", &args, &[], None, false) {
Ok(_) => Ok(()),
Err(err) => match err {
Error::ShellProgramNotFound { .. } => Ok(()),
_ => Err(Error::GitInitialization {
error: err.to_string(),
}),
},
}
}
pub fn canonicalise(path: &Path) -> Result<PathBuf, Error> {
std::fs::canonicalize(path).map_err(|err| Error::FileIo {
action: FileIoAction::Canonicalise,
kind: FileKind::File,
path: PathBuf::from(path),
err: Some(err.to_string()),
})
}
| 29.979239 | 101 | 0.547091 |
e41f1dff9c9ca35aab1b0f8d92951b83fb9d3886 | 11,017 | use cassandra_cpp::{stmt, Session, Statement};
use criterion::{criterion_group, criterion_main, Criterion};
use test_helpers::docker_compose::DockerCompose;
use test_helpers::lazy::new_lazy_shared;
#[path = "../tests/helpers/mod.rs"]
mod helpers;
use helpers::ShotoverManager;
struct Query {
name: &'static str,
statement: Statement,
}
fn cassandra(c: &mut Criterion) {
let mut group = c.benchmark_group("cassandra");
group.throughput(criterion::Throughput::Elements(1));
group.noise_threshold(0.2);
let queries = [
Query {
name: "insert",
statement: stmt!(
"INSERT INTO benchmark_keyspace.table_1 (id, x, name) VALUES (1, 11, 'foo');"
),
},
Query {
name: "select",
statement: stmt!("SELECT id, x, name FROM benchmark_keyspace.table_1;"),
},
];
// Benches the case where the message does not meet the criteria for encryption
#[cfg(feature = "alpha-transforms")]
{
let resources = new_lazy_shared(|| {
BenchResources::new(
"example-configs/cassandra-protect-local/topology.yaml",
"example-configs/cassandra-protect-local/docker-compose.yml",
)
});
for query in &queries {
group.bench_with_input(
format!("protect_local_{}_unencrypted", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
{
let resources = new_lazy_shared(|| {
BenchResources::new(
"example-configs/cassandra-redis-cache/topology.yaml",
"example-configs/cassandra-redis-cache/docker-compose.yml",
)
});
// Benches the case where the message does not meet the criteria for caching
for query in &queries {
group.bench_with_input(
format!("redis_cache_{}_uncached", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
{
let resources = new_lazy_shared(|| {
BenchResources::new(
"example-configs/cassandra-passthrough/topology.yaml",
"example-configs/cassandra-passthrough/docker-compose.yml",
)
});
for query in &queries {
group.bench_with_input(
format!("passthrough_no_parse_{}", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
#[cfg(feature = "alpha-transforms")]
{
let resources = new_lazy_shared(|| {
BenchResources::new(
"tests/test-configs/cassandra-passthrough-parse-request/topology.yaml",
"tests/test-configs/cassandra-passthrough-parse-request/docker-compose.yml",
)
});
for query in &queries {
group.bench_with_input(
format!("passthrough_parse_request_{}", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
#[cfg(feature = "alpha-transforms")]
{
let resources = new_lazy_shared(|| {
BenchResources::new(
"tests/test-configs/cassandra-passthrough-parse-response/topology.yaml",
"tests/test-configs/cassandra-passthrough-parse-response/docker-compose.yml",
)
});
for query in &queries {
group.bench_with_input(
format!("passthrough_parse_response_{}", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
{
let resources = new_lazy_shared(|| {
BenchResources::new_tls(
"example-configs/cassandra-tls/topology.yaml",
"example-configs/cassandra-tls/docker-compose.yml",
)
});
for query in &queries {
group.bench_with_input(format!("tls_{}", query.name), &resources, |b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
});
}
}
#[cfg(feature = "alpha-transforms")]
{
let queries = [
Query {
name: "insert",
statement: stmt!("INSERT INTO test_protect_keyspace.test_table (pk, cluster, col1, col2, col3) VALUES ('pk1', 'cluster', 'I am gonna get encrypted!!', 42, true);"),
},
Query {
name: "select",
statement: stmt!("SELECT pk, cluster, col1, col2, col3 FROM test_protect_keyspace.test_table"),
},
];
let resources = new_lazy_shared(|| {
let resources = BenchResources::new(
"example-configs/cassandra-protect-local/topology.yaml",
"example-configs/cassandra-protect-local/docker-compose.yml",
);
resources
.connection
.execute(&stmt!(
"CREATE KEYSPACE test_protect_keyspace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"
))
.wait()
.unwrap();
resources
.connection
.execute(&stmt!(
"CREATE TABLE test_protect_keyspace.test_table (pk varchar PRIMARY KEY, cluster varchar, col1 varchar, col2 int, col3 boolean);"
))
.wait()
.unwrap();
resources
.connection
.execute(&stmt!(
"INSERT INTO test_protect_keyspace.test_table (pk, cluster, col1, col2, col3) VALUES ('pk1', 'cluster', 'Initial value', 42, true);"
))
.wait()
.unwrap();
resources
});
for query in queries {
// Benches the case where the message meets the criteria for encryption
group.bench_with_input(
format!("protect_local_{}_encrypted", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
{
let resources = new_lazy_shared(|| {
BenchResources::new(
"example-configs/cassandra-request-throttling/topology.yaml",
"example-configs/cassandra-request-throttling/docker-compose.yml",
)
});
for query in &queries {
group.bench_with_input(
format!("request_throttling_{}", query.name),
&resources,
|b, resources| {
b.iter(|| {
let mut resources = resources.borrow_mut();
let connection = &mut resources.as_mut().unwrap().connection;
connection.execute(&query.statement).wait().unwrap();
})
},
);
}
}
}
criterion_group!(benches, cassandra);
criterion_main!(benches);
pub struct BenchResources {
_compose: DockerCompose,
_shotover_manager: ShotoverManager,
connection: Session,
}
impl BenchResources {
fn new(shotover_topology: &str, compose_file: &str) -> Self {
let compose = DockerCompose::new(compose_file);
let shotover_manager = ShotoverManager::from_topology_file(shotover_topology);
let connection = shotover_manager.cassandra_connection("127.0.0.1", 9042);
let bench_resources = Self {
_compose: compose,
_shotover_manager: shotover_manager,
connection,
};
bench_resources.setup();
bench_resources
}
fn new_tls(shotover_topology: &str, compose_file: &str) -> Self {
let compose = DockerCompose::new(compose_file);
let shotover_manager = ShotoverManager::from_topology_file(shotover_topology);
let ca_cert = "example-configs/cassandra-tls/certs/localhost_CA.crt";
let connection = shotover_manager.cassandra_connection_tls("127.0.0.1", 9042, ca_cert);
let bench_resources = Self {
_compose: compose,
_shotover_manager: shotover_manager,
connection,
};
bench_resources.setup();
bench_resources
}
fn setup(&self) {
self.connection
.execute(&stmt!(
"CREATE KEYSPACE benchmark_keyspace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"
))
.wait().unwrap();
self.connection
.execute(&stmt!(
"CREATE TABLE benchmark_keyspace.table_1 (id int PRIMARY KEY, x int, name varchar);"
))
.wait()
.unwrap();
self.connection
.execute(&stmt!(
"INSERT INTO benchmark_keyspace.table_1 (id, x, name) VALUES (0, 10, 'initial value');"
))
.wait()
.unwrap();
}
}
| 35.424437 | 180 | 0.51239 |
4b42a06ccc89a53a8d62fc3680ea14a3e153953b | 2,326 | use std::env;
use std::fs;
use std::path::PathBuf;
use std::process::Command;
use witx_bindgen_gen_core::witx2::abi::Direction;
use witx_bindgen_gen_core::Generator;
#[test]
fn run() {
let mut dir = PathBuf::from(env!("OUT_DIR"));
dir.push("run");
drop(fs::remove_dir_all(&dir));
fs::create_dir_all(&dir).unwrap();
fs::create_dir_all(&dir.join("imports")).unwrap();
fs::create_dir_all(&dir.join("exports")).unwrap();
println!("OUT_DIR = {:?}", dir);
println!("Generating bindings...");
let iface =
witx_bindgen_gen_core::witx2::Interface::parse_file("../../tests/host.witx").unwrap();
let mut files = Default::default();
witx_bindgen_gen_wasmtime_py::Opts::default()
.build()
.generate(&iface, Direction::Import, &mut files);
for (file, contents) in files.iter() {
fs::write(dir.join("imports").join(file), contents).unwrap();
}
fs::write(dir.join("imports").join("__init__.py"), "").unwrap();
let iface =
witx_bindgen_gen_core::witx2::Interface::parse_file("../../tests/wasm.witx").unwrap();
let mut files = Default::default();
witx_bindgen_gen_wasmtime_py::Opts::default()
.build()
.generate(&iface, Direction::Export, &mut files);
for (file, contents) in files.iter() {
fs::write(dir.join("exports").join(file), contents).unwrap();
}
fs::write(dir.join("exports").join("__init__.py"), "").unwrap();
println!("Running mypy...");
exec(
Command::new("mypy")
.env("MYPYPATH", &dir)
.arg("tests/run.py"),
);
for (_name, wasm) in build_test_wasm::WASMS {
println!("Running {}...", wasm);
exec(
Command::new("python3")
.env("PYTHONPATH", &dir)
.arg("tests/run.py")
.arg(wasm),
);
}
}
fn exec(cmd: &mut Command) {
println!("{:?}", cmd);
let output = cmd.output().unwrap();
if output.status.success() {
return;
}
println!("status: {}", output.status);
println!(
"stdout ---\n {}",
String::from_utf8_lossy(&output.stdout).replace("\n", "\n ")
);
println!(
"stderr ---\n {}",
String::from_utf8_lossy(&output.stderr).replace("\n", "\n ")
);
panic!("no success");
}
| 30.605263 | 94 | 0.565778 |
6ab76937b582e6118846f61ce3041c87c24edaa9 | 8,491 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::INTTCSTAT {
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT0R {
bits: bool,
}
impl INTTCSTAT0R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT1R {
bits: bool,
}
impl INTTCSTAT1R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT2R {
bits: bool,
}
impl INTTCSTAT2R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT3R {
bits: bool,
}
impl INTTCSTAT3R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT4R {
bits: bool,
}
impl INTTCSTAT4R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT5R {
bits: bool,
}
impl INTTCSTAT5R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT6R {
bits: bool,
}
impl INTTCSTAT6R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INTTCSTAT7R {
bits: bool,
}
impl INTTCSTAT7R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat0(&self) -> INTTCSTAT0R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT0R { bits }
}
#[doc = "Bit 1 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat1(&self) -> INTTCSTAT1R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT1R { bits }
}
#[doc = "Bit 2 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat2(&self) -> INTTCSTAT2R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT2R { bits }
}
#[doc = "Bit 3 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat3(&self) -> INTTCSTAT3R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT3R { bits }
}
#[doc = "Bit 4 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat4(&self) -> INTTCSTAT4R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT4R { bits }
}
#[doc = "Bit 5 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat5(&self) -> INTTCSTAT5R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT5R { bits }
}
#[doc = "Bit 6 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat6(&self) -> INTTCSTAT6R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT6R { bits }
}
#[doc = "Bit 7 - Terminal count interrupt request status for DMA channels. Each bit represents one channel: 0 - the corresponding channel has no active terminal count interrupt request. 1 - the corresponding channel does have an active terminal count interrupt request."]
#[inline]
pub fn inttcstat7(&self) -> INTTCSTAT7R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INTTCSTAT7R { bits }
}
}
| 31.565056 | 275 | 0.573077 |
dbb6027eadf991b57c81f23ba1ff0257ad5ec6c9 | 40 | pub type Point = nalgebra::Point2<f32>;
| 20 | 39 | 0.725 |
722741c9543b8db5f6113eb52863f9fb64380827 | 1,346 | /*
* WarframeStat.us API
*
* Simple API for data from the game Warframe. [Parser Docs](https://wfcd.github.io/warframe-worldstate-parser/) [Items Types](https://github.com/WFCD/warframe-items/blob/master/index.d.ts)
*
* The version of the OpenAPI document: living
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct VoidTraderAllOf {
#[serde(rename = "character")]
pub character: String,
#[serde(rename = "location")]
pub location: String,
#[serde(rename = "inventory")]
pub inventory: Vec<crate::models::VoidTraderAllOfInventory>,
#[serde(rename = "psId")]
pub ps_id: String,
#[serde(rename = "active")]
pub active: bool,
#[serde(rename = "startString")]
pub start_string: String,
#[serde(rename = "endString")]
pub end_string: String,
}
impl VoidTraderAllOf {
pub fn new(character: String, location: String, inventory: Vec<crate::models::VoidTraderAllOfInventory>, ps_id: String, active: bool, start_string: String, end_string: String) -> VoidTraderAllOf {
VoidTraderAllOf {
character,
location,
inventory,
ps_id,
active,
start_string,
end_string,
}
}
}
| 28.638298 | 200 | 0.644874 |
4bef7b81efafd270bfb5db7f42659819fd654ddb | 2,812 | //! Facilities for working with `v8::HandleScope`s and `v8::EscapableHandleScope`s.
use crate::raw::{EscapableHandleScope, HandleScope, InheritedHandleScope, Isolate};
pub trait Root {
/// # Safety
/// Allocates an uninitialized scope. See `enter` and `exit`.
unsafe fn allocate() -> Self;
/// # Safety
/// Must be called exactly once after creating a `Root` and before usage
unsafe fn enter(&mut self, _: Isolate);
/// # Safety
/// Must be called exactly once, if and only if `enter` succeeds
unsafe fn exit(&mut self, _: Isolate);
}
impl Root for HandleScope {
unsafe fn allocate() -> Self {
HandleScope::new()
}
unsafe fn enter(&mut self, isolate: Isolate) {
enter(self, isolate)
}
unsafe fn exit(&mut self, _: Isolate) {
exit(self)
}
}
impl Root for EscapableHandleScope {
unsafe fn allocate() -> Self {
EscapableHandleScope::new()
}
unsafe fn enter(&mut self, isolate: Isolate) {
enter_escapable(self, isolate)
}
unsafe fn exit(&mut self, _: Isolate) {
exit_escapable(self)
}
}
impl Root for InheritedHandleScope {
unsafe fn allocate() -> Self {
InheritedHandleScope
}
unsafe fn enter(&mut self, _: Isolate) {}
unsafe fn exit(&mut self, _: Isolate) {}
}
/// Mutates the `out` argument provided to refer to the newly escaped `v8::Local` value.
pub use neon_sys::Neon_Scope_Escape as escape;
/// Creates a `v8::EscapableHandleScope` and calls the `callback` provided with the argument
/// signature `(out, parent_scope, &v8_scope, closure)`.
pub use neon_sys::Neon_Scope_Chained as chained;
/// Creates a `v8::HandleScope` and calls the `callback` provided with the argument signature
/// `(out, realm, closure)`.
pub use neon_sys::Neon_Scope_Nested as nested;
/// Instantiates a new `v8::HandleScope`.
pub use neon_sys::Neon_Scope_Enter as enter;
/// Destructs a `v8::HandleScope`.
pub use neon_sys::Neon_Scope_Exit as exit;
/// Instantiates a new `v8::HandleScope`.
pub use neon_sys::Neon_Scope_Enter_Escapable as enter_escapable;
/// Destructs a `v8::HandleScope`.
pub use neon_sys::Neon_Scope_Exit_Escapable as exit_escapable;
/// Gets the size of a `v8::HandleScope`.
pub use neon_sys::Neon_Scope_Sizeof as size;
/// Gets the alignment requirement of a `v8::HandleScope`.
pub use neon_sys::Neon_Scope_Alignof as alignment;
/// Gets the size of a `v8::EscapableHandleScope`.
pub use neon_sys::Neon_Scope_SizeofEscapable as escapable_size;
/// Gets the alignment requirement of a `v8::EscapableHandleScope`.
pub use neon_sys::Neon_Scope_AlignofEscapable as escapable_alignment;
/// Mutates the `out` argument provided to refer to the `v8::Local` value of the `global`
/// object
pub use neon_sys::Neon_Scope_GetGlobal as get_global;
| 32.321839 | 93 | 0.70128 |
f8ce48ac1f4a1b71471b4f4df8a2f29fff1e50e2 | 8,476 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::MODE {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `OPERATION`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum OPERATIONR {
#[doc = "Sample and store one pair (Left + Right) of 16bit samples per RAM word R=\\[31:16\\]; L=\\[15:0\\]"]
STEREO,
#[doc = "Sample and store two successive Left samples (16 bit each) per RAM word L1=\\[31:16\\]; L0=\\[15:0\\]"]
MONO,
}
impl OPERATIONR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
OPERATIONR::STEREO => false,
OPERATIONR::MONO => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> OPERATIONR {
match value {
false => OPERATIONR::STEREO,
true => OPERATIONR::MONO,
}
}
#[doc = "Checks if the value of the field is `STEREO`"]
#[inline]
pub fn is_stereo(&self) -> bool {
*self == OPERATIONR::STEREO
}
#[doc = "Checks if the value of the field is `MONO`"]
#[inline]
pub fn is_mono(&self) -> bool {
*self == OPERATIONR::MONO
}
}
#[doc = "Possible values of the field `EDGE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EDGER {
#[doc = "Left (or mono) is sampled on falling edge of PDM_CLK"]
LEFTFALLING,
#[doc = "Left (or mono) is sampled on rising edge of PDM_CLK"]
LEFTRISING,
}
impl EDGER {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
EDGER::LEFTFALLING => false,
EDGER::LEFTRISING => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> EDGER {
match value {
false => EDGER::LEFTFALLING,
true => EDGER::LEFTRISING,
}
}
#[doc = "Checks if the value of the field is `LEFTFALLING`"]
#[inline]
pub fn is_left_falling(&self) -> bool {
*self == EDGER::LEFTFALLING
}
#[doc = "Checks if the value of the field is `LEFTRISING`"]
#[inline]
pub fn is_left_rising(&self) -> bool {
*self == EDGER::LEFTRISING
}
}
#[doc = "Values that can be written to the field `OPERATION`"]
pub enum OPERATIONW {
#[doc = "Sample and store one pair (Left + Right) of 16bit samples per RAM word R=\\[31:16\\]; L=\\[15:0\\]"]
STEREO,
#[doc = "Sample and store two successive Left samples (16 bit each) per RAM word L1=\\[31:16\\]; L0=\\[15:0\\]"]
MONO,
}
impl OPERATIONW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
OPERATIONW::STEREO => false,
OPERATIONW::MONO => true,
}
}
}
#[doc = r" Proxy"]
pub struct _OPERATIONW<'a> {
w: &'a mut W,
}
impl<'a> _OPERATIONW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: OPERATIONW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Sample and store one pair (Left + Right) of 16bit samples per RAM word R=\\[31:16\\]; L=\\[15:0\\]"]
#[inline]
pub fn stereo(self) -> &'a mut W {
self.variant(OPERATIONW::STEREO)
}
#[doc = "Sample and store two successive Left samples (16 bit each) per RAM word L1=\\[31:16\\]; L0=\\[15:0\\]"]
#[inline]
pub fn mono(self) -> &'a mut W {
self.variant(OPERATIONW::MONO)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `EDGE`"]
pub enum EDGEW {
#[doc = "Left (or mono) is sampled on falling edge of PDM_CLK"]
LEFTFALLING,
#[doc = "Left (or mono) is sampled on rising edge of PDM_CLK"]
LEFTRISING,
}
impl EDGEW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
EDGEW::LEFTFALLING => false,
EDGEW::LEFTRISING => true,
}
}
}
#[doc = r" Proxy"]
pub struct _EDGEW<'a> {
w: &'a mut W,
}
impl<'a> _EDGEW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: EDGEW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Left (or mono) is sampled on falling edge of PDM_CLK"]
#[inline]
pub fn left_falling(self) -> &'a mut W {
self.variant(EDGEW::LEFTFALLING)
}
#[doc = "Left (or mono) is sampled on rising edge of PDM_CLK"]
#[inline]
pub fn left_rising(self) -> &'a mut W {
self.variant(EDGEW::LEFTRISING)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Mono or stereo operation"]
#[inline]
pub fn operation(&self) -> OPERATIONR {
OPERATIONR::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 1 - Defines on which PDM_CLK edge Left (or mono) is sampled"]
#[inline]
pub fn edge(&self) -> EDGER {
EDGER::_from({
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Mono or stereo operation"]
#[inline]
pub fn operation(&mut self) -> _OPERATIONW {
_OPERATIONW { w: self }
}
#[doc = "Bit 1 - Defines on which PDM_CLK edge Left (or mono) is sampled"]
#[inline]
pub fn edge(&mut self) -> _EDGEW {
_EDGEW { w: self }
}
}
| 27.973597 | 116 | 0.525366 |
9151b819bf76c8ea92b3e14a7b3231186b60f71a | 53,616 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub fn parse_http_generic_error(
response: &http::Response<bytes::Bytes>,
) -> Result<smithy_types::Error, smithy_json::deserialize::Error> {
crate::json_errors::parse_generic_error(response.body(), response.headers())
}
pub fn deser_structure_crate_error_internal_failure_exceptionjson_err(
input: &[u8],
mut builder: crate::error::internal_failure_exception::Builder,
) -> Result<crate::error::internal_failure_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"code" => {
builder = builder.set_code(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_invalid_request_exceptionjson_err(
input: &[u8],
mut builder: crate::error::invalid_request_exception::Builder,
) -> Result<crate::error::invalid_request_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"code" => {
builder = builder.set_code(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_resource_conflict_exceptionjson_err(
input: &[u8],
mut builder: crate::error::resource_conflict_exception::Builder,
) -> Result<crate::error::resource_conflict_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"code" => {
builder = builder.set_code(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_resource_not_found_exceptionjson_err(
input: &[u8],
mut builder: crate::error::resource_not_found_exception::Builder,
) -> Result<crate::error::resource_not_found_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"code" => {
builder = builder.set_code(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_too_many_requests_exceptionjson_err(
input: &[u8],
mut builder: crate::error::too_many_requests_exception::Builder,
) -> Result<crate::error::too_many_requests_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"code" => {
builder = builder.set_code(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_describe_placement(
input: &[u8],
mut builder: crate::output::describe_placement_output::Builder,
) -> Result<crate::output::describe_placement_output::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"placement" => {
builder = builder.set_placement(
crate::json_deser::deser_structure_crate_model_placement_description(
tokens,
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_describe_project(
input: &[u8],
mut builder: crate::output::describe_project_output::Builder,
) -> Result<crate::output::describe_project_output::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"project" => {
builder = builder.set_project(
crate::json_deser::deser_structure_crate_model_project_description(
tokens,
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_get_devices_in_placement(
input: &[u8],
mut builder: crate::output::get_devices_in_placement_output::Builder,
) -> Result<crate::output::get_devices_in_placement_output::Builder, smithy_json::deserialize::Error>
{
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"devices" => {
builder = builder.set_devices(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_device_map(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_list_placements(
input: &[u8],
mut builder: crate::output::list_placements_output::Builder,
) -> Result<crate::output::list_placements_output::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"nextToken" => {
builder = builder.set_next_token(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"placements" => {
builder = builder.set_placements(
crate::json_deser::deser_list_com_amazonaws_iot1clickprojects_placement_summary_list(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_list_projects(
input: &[u8],
mut builder: crate::output::list_projects_output::Builder,
) -> Result<crate::output::list_projects_output::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"nextToken" => {
builder = builder.set_next_token(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"projects" => {
builder = builder.set_projects(
crate::json_deser::deser_list_com_amazonaws_iot1clickprojects_project_summary_list(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_list_tags_for_resource(
input: &[u8],
mut builder: crate::output::list_tags_for_resource_output::Builder,
) -> Result<crate::output::list_tags_for_resource_output::Builder, smithy_json::deserialize::Error>
{
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_tag_map(
tokens,
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn or_empty_doc(data: &[u8]) -> &[u8] {
if data.is_empty() {
b"{}"
} else {
data
}
}
pub fn deser_structure_crate_model_placement_description<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::PlacementDescription>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::PlacementDescription::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"projectName" => {
builder = builder.set_project_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"placementName" => {
builder = builder.set_placement_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"attributes" => {
builder = builder.set_attributes(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_placement_attribute_map(tokens)?
);
}
"createdDate" => {
builder = builder.set_created_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"updatedDate" => {
builder = builder.set_updated_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_project_description<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ProjectDescription>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ProjectDescription::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"arn" => {
builder = builder.set_arn(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"projectName" => {
builder = builder.set_project_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"description" => {
builder = builder.set_description(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"createdDate" => {
builder = builder.set_created_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"updatedDate" => {
builder = builder.set_updated_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"placementTemplate" => {
builder = builder.set_placement_template(
crate::json_deser::deser_structure_crate_model_placement_template(tokens)?
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_tag_map(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_map_com_amazonaws_iot1clickprojects_device_map<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::collections::HashMap<std::string::String, std::string::String>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
let mut map = std::collections::HashMap::new();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
let key = key.to_unescaped().map(|u| u.into_owned())?;
let value =
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?;
if let Some(value) = value {
map.insert(key, value);
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(map))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_iot1clickprojects_placement_summary_list<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::PlacementSummary>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_placement_summary(
tokens,
)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_iot1clickprojects_project_summary_list<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::ProjectSummary>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_project_summary(tokens)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_map_com_amazonaws_iot1clickprojects_tag_map<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::collections::HashMap<std::string::String, std::string::String>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
let mut map = std::collections::HashMap::new();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
let key = key.to_unescaped().map(|u| u.into_owned())?;
let value =
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?;
if let Some(value) = value {
map.insert(key, value);
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(map))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_map_com_amazonaws_iot1clickprojects_placement_attribute_map<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::collections::HashMap<std::string::String, std::string::String>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
let mut map = std::collections::HashMap::new();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
let key = key.to_unescaped().map(|u| u.into_owned())?;
let value =
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?;
if let Some(value) = value {
map.insert(key, value);
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(map))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_placement_template<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::PlacementTemplate>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::PlacementTemplate::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"defaultAttributes" => {
builder = builder.set_default_attributes(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_default_placement_attribute_map(tokens)?
);
}
"deviceTemplates" => {
builder = builder.set_device_templates(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_device_template_map(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_placement_summary<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::PlacementSummary>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::PlacementSummary::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"projectName" => {
builder = builder.set_project_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"placementName" => {
builder = builder.set_placement_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"createdDate" => {
builder = builder.set_created_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"updatedDate" => {
builder = builder.set_updated_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_project_summary<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ProjectSummary>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ProjectSummary::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"arn" => {
builder = builder.set_arn(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"projectName" => {
builder = builder.set_project_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"createdDate" => {
builder = builder.set_created_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"updatedDate" => {
builder = builder.set_updated_date(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_tag_map(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_map_com_amazonaws_iot1clickprojects_default_placement_attribute_map<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::collections::HashMap<std::string::String, std::string::String>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
let mut map = std::collections::HashMap::new();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
let key = key.to_unescaped().map(|u| u.into_owned())?;
let value =
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?;
if let Some(value) = value {
map.insert(key, value);
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(map))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_map_com_amazonaws_iot1clickprojects_device_template_map<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::collections::HashMap<std::string::String, crate::model::DeviceTemplate>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
let mut map = std::collections::HashMap::new();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
let key = key.to_unescaped().map(|u| u.into_owned())?;
let value =
crate::json_deser::deser_structure_crate_model_device_template(tokens)?;
if let Some(value) = value {
map.insert(key, value);
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(map))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_device_template<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::DeviceTemplate>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::DeviceTemplate::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"deviceType" => {
builder = builder.set_device_type(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"callbackOverrides" => {
builder = builder.set_callback_overrides(
crate::json_deser::deser_map_com_amazonaws_iot1clickprojects_device_callback_override_map(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_map_com_amazonaws_iot1clickprojects_device_callback_override_map<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::collections::HashMap<std::string::String, std::string::String>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
let mut map = std::collections::HashMap::new();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
let key = key.to_unescaped().map(|u| u.into_owned())?;
let value =
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?;
if let Some(value) = value {
map.insert(key, value);
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(map))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
| 43.590244 | 137 | 0.46654 |
e2ae016a7c8be95cfda0fe215c3d744254cd9d7a | 17,646 | use std::{
collections::{HashMap, HashSet},
fs,
mem::size_of,
};
use elfen::{
elf::Elf,
header::{self, Header},
rel::{self, Rela},
section::{self, SectionData, SectionHeader},
segment::{self, ProgramHeader},
strtab::Strtab,
symbol::{self, Symbol},
tse::Tse,
};
pub fn link_to_files(input_files: Vec<String>, output_file: String) -> Result<(), String> {
let input_elfs = input_files
.into_iter()
.map(|path| Elf::read_from_file(&path))
.collect();
let output_elf = link(input_elfs)?;
let elf_bytes = output_elf.to_bytes();
fs::write(output_file, elf_bytes).unwrap();
Ok(())
}
pub fn link(input_elfs: Vec<Elf>) -> Result<Elf, String> {
let linker = Linker::new(input_elfs);
let output_elf = linker.link()?;
Ok(output_elf)
}
struct Linker {
input_elfs: Vec<Elf>,
output_elf: Elf,
global_symbols: HashMap<String, SymbolSignature>,
symbol_map: HashMap<SectionPlace, Vec<String>>,
relas: Vec<RelaSignature>,
rela_map: HashMap<SectionPlace, Vec<usize>>,
tses: Vec<TseSignature>,
symbol_indices: HashMap<String, usize>,
section_offsets: HashMap<usize, u64>,
}
const BASE_ADDRESS: u64 = 0x400000;
const PAGE_SIZE: u64 = 0x1000;
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
struct SectionPlace {
// elf_index = 0 is reserved for output_elf
elf_index: usize,
section_index: usize,
}
#[derive(Debug)]
struct SymbolSignature {
name: String,
symbol: Symbol,
}
#[derive(Debug)]
struct RelaSignature {
symbol_name: String,
rela: Rela,
}
#[derive(Debug)]
struct TseSignature {
symbol_name: String,
tse: Tse,
}
impl Linker {
fn new(input_elfs: Vec<Elf>) -> Self {
Self {
input_elfs,
output_elf: Elf::default(),
global_symbols: HashMap::new(),
symbol_map: HashMap::new(),
relas: Vec::new(),
rela_map: HashMap::new(),
tses: Vec::new(),
symbol_indices: HashMap::new(),
section_offsets: HashMap::new(),
}
}
fn link(mut self) -> Result<Elf, String> {
self.init_elf();
self.load_tses();
self.load_relas();
self.load_symbols();
self.link_sections();
self.layout();
self.resolve_relas();
self.gen_symtab_strtab();
self.gen_tse_info();
self.gen_shstrtab();
self.layout();
self.finalize_elf();
Ok(self.output_elf)
}
fn init_elf(&mut self) {
let header = &mut self.output_elf.header;
header.set_class(header::Class::Class64);
header.set_data(header::Data::Data2LSB);
header.set_osabi(header::OSABI::OSABISysV);
header.set_filetype(header::Type::Exec);
header.set_machine(header::Machine::X86_64);
self.output_elf
.add_section("", SectionHeader::default(), SectionData::None);
}
fn load_tses(&mut self) {
for elf in self.input_elfs.iter_mut() {
let tses = if let Some(section) = elf.get_section_mut(".tse_info") {
std::mem::take(section.data.as_tse_mut().unwrap())
} else {
continue;
};
let symtab_section = elf.get_section(".symtab").unwrap();
let symbols = symtab_section.data.as_symbols().unwrap();
let strtab_section = elf.get_section(".strtab").unwrap();
let strtab = strtab_section.data.as_strtab().unwrap();
for tse in tses {
let symbol = symbols.get(tse.symbol_index as usize).unwrap();
let symbol_name = strtab.get(symbol.name as usize);
self.tses.push(TseSignature { symbol_name, tse });
}
}
}
fn load_relas(&mut self) {
for (elf_index, elf) in self.input_elfs.iter_mut().enumerate() {
let rela_sections: Vec<(usize, Vec<Rela>)> = elf
.sections
.iter_mut()
.filter(|section| section.header.get_type() == section::Type::Rela)
.map(|section| {
let section_index = section.header.info as usize;
let relas = std::mem::replace(section.data.as_rela_mut().unwrap(), Vec::new());
(section_index, relas)
})
.collect();
let symtab_section = elf.get_section(".symtab").unwrap();
let symbols = symtab_section.data.as_symbols().unwrap();
let strtab_section = elf.get_section(".strtab").unwrap();
let strtab = strtab_section.data.as_strtab().unwrap();
for (section_index, relas) in rela_sections {
for rela in relas {
let symbol = symbols.get(rela.get_symbol() as usize).unwrap();
let symbol_name = strtab.get(symbol.name as usize);
self.relas.push(RelaSignature { symbol_name, rela });
let place = SectionPlace {
elf_index: elf_index + 1,
section_index,
};
self.rela_map
.entry(place)
.or_insert(Vec::new())
.push(self.relas.len() - 1);
}
}
}
}
fn load_symbols(&mut self) {
for (elf_index, elf) in self.input_elfs.iter_mut().enumerate() {
let symtab_section = elf.get_section_mut(".symtab").unwrap();
let symtab_data = symtab_section.data.as_symbols_mut().unwrap();
let symbols = std::mem::replace(symtab_data, Vec::new());
let strtab_section = elf.get_section_mut(".strtab").unwrap();
let strtab_data = strtab_section.data.as_strtab_mut().unwrap();
let strtab = std::mem::replace(strtab_data, Strtab::default());
for symbol in symbols {
if symbol.get_binding() != symbol::Binding::Global {
continue;
}
let symbol_name = strtab.get(symbol.name as usize);
if let Some(symbol_sig) = self.global_symbols.get(&symbol_name) {
if symbol.get_index_type() == symbol::IndexType::Undef {
continue;
}
if symbol_sig.symbol.get_index_type() != symbol::IndexType::Undef {
panic!("duplicate symbol: {}", symbol_name);
}
}
self.global_symbols.insert(
symbol_name.clone(),
SymbolSignature {
name: symbol_name.clone(),
symbol,
},
);
let place = SectionPlace {
elf_index: elf_index + 1,
section_index: symbol.section_index as usize,
};
self.symbol_map
.entry(place)
.or_insert(Vec::new())
.push(symbol_name);
}
}
}
fn link_sections(&mut self) {
for section_name in self.list_sections_to_alloc() {
let new_section_index = self.output_elf.sections.len();
let mut section_header = None;
let mut linked_data: Vec<u8> = Vec::new();
for (elf_index, elf) in self.input_elfs.iter_mut().enumerate() {
let section_index = if let Some(index) = elf.find_section(§ion_name) {
index
} else {
continue;
};
let section = elf.sections.get(section_index).unwrap();
section_header = Some(section.header);
let offset = linked_data.len() as u64;
let section_data = section.data.as_raw().unwrap();
linked_data.extend(section_data);
let place = SectionPlace {
elf_index: elf_index + 1,
section_index,
};
let new_place = SectionPlace {
elf_index: 0,
section_index: new_section_index,
};
// offset symbols
if let Some(symbol_names) = self.symbol_map.remove(&place) {
for symbol_name in &symbol_names {
let symbol_sig = self.global_symbols.get_mut(symbol_name).unwrap();
symbol_sig.symbol.section_index = new_section_index as u16;
symbol_sig.symbol.value += offset;
}
self.symbol_map
.entry(new_place.clone())
.or_insert(Vec::new())
.extend(symbol_names);
}
// offset relas
if let Some(rela_indices) = self.rela_map.remove(&place) {
for rela_index in &rela_indices {
let rela_sig = self.relas.get_mut(*rela_index).unwrap();
rela_sig.rela.offset += offset;
}
self.rela_map
.entry(new_place)
.or_insert(Vec::new())
.extend(rela_indices);
}
}
if linked_data.len() == 0 {
continue;
}
self.output_elf.add_section(
§ion_name,
section_header.unwrap(),
SectionData::Raw(linked_data),
);
}
}
fn list_sections_to_alloc(&self) -> Vec<String> {
let mut section_names = HashSet::new();
for elf in &self.input_elfs {
for section in &elf.sections {
if section::Flags::Alloc.contained_in(section.header.flags) {
section_names.insert(section.name.clone());
}
}
}
let mut symbol_names: Vec<String> = section_names.into_iter().collect();
symbol_names.sort();
symbol_names
}
fn layout(&mut self) {
self.output_elf.segments.clear();
let mut cur_offset = size_of::<Header>() as u64;
for (section_index, section) in self.output_elf.sections.iter_mut().enumerate() {
// skip null section
if section_index == 0 {
continue;
}
let shdr = &mut section.header;
shdr.size = section.data.len() as u64;
if section::Flags::Alloc.contained_in(shdr.flags) {
let mut phdr = Self::gen_segment(&shdr);
shdr.offset = Self::align(cur_offset, phdr.alignment);
phdr.offset = shdr.offset;
shdr.addr = BASE_ADDRESS + shdr.offset;
phdr.virt_addr = shdr.addr;
phdr.phys_addr = shdr.addr;
self.output_elf.segments.push(phdr);
} else {
shdr.offset = Self::align(cur_offset, shdr.alignment);
}
cur_offset = shdr.offset + shdr.size;
let offset = if shdr.addr != 0 {
shdr.addr
} else {
shdr.offset
};
self.section_offsets.insert(section_index, offset);
}
}
fn resolve_relas(&mut self) {
for (section_index, section) in self.output_elf.sections.iter_mut().enumerate() {
let place = SectionPlace {
elf_index: 0,
section_index,
};
let rela_indices = if let Some(indices) = self.rela_map.get(&place) {
indices
} else {
continue;
};
for rela_index in rela_indices {
let rela_sig = self.relas.get_mut(*rela_index).unwrap();
let target_symbol = self
.global_symbols
.get(&rela_sig.symbol_name)
.unwrap()
.symbol;
let addr_from = rela_sig.rela.offset as i32;
let addr_to = target_symbol.value as i32;
let mut diff = match rela_sig.rela.get_type() {
rel::Type::Pc32 => {
let offset_from = *self.section_offsets.get(§ion_index).unwrap() as i32;
let sym_idx: u16 = target_symbol.get_index_type().into();
let offset_to = *self.section_offsets.get(&sym_idx.into()).unwrap() as i32;
(addr_to + offset_to) - (addr_from + offset_from)
}
rel::Type::Plt32 => addr_to - addr_from,
_ => panic!(),
};
diff += rela_sig.rela.addend as i32;
let code_index = addr_from as usize;
let section_data = section.data.as_raw_mut().unwrap();
for (i, value) in diff.to_le_bytes().iter().enumerate() {
section_data[(code_index + i)] = *value;
}
}
}
}
fn gen_segment(shdr: &SectionHeader) -> ProgramHeader {
let mut phdr = ProgramHeader::default();
phdr.set_type(segment::Type::Load);
phdr.set_flags(segment::Flags::R);
phdr.alignment = PAGE_SIZE;
phdr.file_size = shdr.size;
phdr.memory_size = shdr.size;
if section::Flags::Execinstr.contained_in(shdr.flags) {
phdr.set_flags(segment::Flags::X);
}
if section::Flags::Write.contained_in(shdr.flags) {
phdr.set_flags(segment::Flags::W);
}
phdr
}
fn align(x: u64, align: u64) -> u64 {
(x + align - 1) & !(align - 1)
}
fn gen_symtab_strtab(&mut self) {
let mut symbols: Vec<Symbol> = Vec::new();
let mut strtab = Strtab::default();
symbols.push(Symbol::default());
strtab.insert("".into());
let mut symbol_sigs: Vec<&SymbolSignature> = self.global_symbols.values().collect();
symbol_sigs.sort_by_key(|sig| sig.symbol.value);
for symbol_sig in symbol_sigs {
let mut symbol = symbol_sig.symbol.clone();
let symbol_name = symbol_sig.name.clone();
let symbol_section_index = symbol.section_index as usize;
symbol.value += self.section_offsets.get(&symbol_section_index).unwrap();
symbol.name = strtab.insert(symbol_name.clone()) as u32;
symbols.push(symbol);
self.symbol_indices.insert(symbol_name, symbols.len() - 1);
}
// generate symtab
{
let mut header = SectionHeader::default();
header.set_type(section::Type::Symtab);
header.entry_size = size_of::<Symbol>() as u64;
header.link = self.output_elf.sections.len() as u32 + 1;
header.alignment = 8;
let num_local_symbols = symbols
.iter()
.filter(|symbol| symbol.get_binding() == symbol::Binding::Local)
.count();
header.info = num_local_symbols as u32;
let data = SectionData::Symbols(symbols);
self.output_elf.add_section(".symtab", header, data);
}
// generate strtab
{
let mut header = SectionHeader::default();
header.set_type(section::Type::Strtab);
header.alignment = 1;
let data = SectionData::Strtab(strtab);
self.output_elf.add_section(".strtab", header, data);
}
}
fn gen_tse_info(&mut self) {
if self.tses.len() == 0 {
return;
}
let mut header = SectionHeader::default();
header.set_type(section::Type::Progbits);
header.entry_size = size_of::<Tse>() as u64;
header.alignment = 8;
let mut tses = Vec::new();
for tse_sig in std::mem::take(&mut self.tses) {
let mut tse = tse_sig.tse;
tse.symbol_index = *self.symbol_indices.get(&tse_sig.symbol_name).unwrap() as u64;
tses.push(tse);
}
let data = SectionData::Tse(tses);
self.output_elf.add_section(".tse_info", header, data);
}
fn gen_shstrtab(&mut self) {
let mut header = SectionHeader::default();
header.set_type(section::Type::Strtab);
header.alignment = 1;
let mut strtab = Strtab::default();
strtab.insert("".into());
for section in self.output_elf.sections.as_mut_slice() {
section.header.name = strtab.insert(section.name.clone()) as u32;
}
header.name = strtab.insert(".shstrtab".into()) as u32;
let data = SectionData::Strtab(strtab);
self.output_elf.add_section(".shstrtab", header, data);
}
fn finalize_elf(&mut self) {
self.output_elf.update_header();
let addr_of_text = self.output_elf.get_section(".text").unwrap().header.addr;
let entrypoint = self.find_symbol("_start").unwrap_or(addr_of_text);
self.output_elf.header.entrypoint = entrypoint;
}
fn find_symbol(&self, name: &str) -> Option<u64> {
let symbol_sig = self.global_symbols.get(name)?;
let symbol = symbol_sig.symbol;
let symbol_section_index = symbol.section_index as usize;
let symbol_offset = self.section_offsets.get(&symbol_section_index).unwrap();
Some(symbol.value + symbol_offset)
}
}
| 33.357278 | 100 | 0.529185 |
79c4afb9768b2926e5ac8d92418ab5b971c08bd1 | 243 | use crate::env::Env;
use crate::ffi::*;
// use deno_core::v8;
// TODO: properly implement ref counting stuff
#[napi_sym]
fn napi_delete_reference(env: napi_env, nref: napi_ref) -> Result {
let mut _env = &mut *(env as *mut Env);
Ok(())
}
| 22.090909 | 67 | 0.666667 |
bbfd01384b23b97cc54e34989641c0ab45d03978 | 1,928 | // Copyright (c) 2018-2020 MobileCoin Inc.
//! Platform Info Blob wrapper
/// The size of a [PlatformInfo]'s x64 representation, in bytes.
pub use mc_sgx_epid_types_sys::SGX_PLATFORM_INFO_SIZE as PLATFORM_INFO_SIZE;
use mc_sgx_core_types::impl_ffi_wrapper;
use mc_sgx_epid_types_sys::sgx_platform_info_t;
#[cfg(feature = "use_prost")]
use mc_util_repr_bytes::derive_prost_message_from_repr_bytes;
#[cfg(feature = "use_serde")]
use mc_util_repr_bytes::derive_serde_from_repr_bytes;
use mc_util_repr_bytes::typenum::U101;
/// A structure containing a "platform info blob", used by IAS
#[derive(Default)]
#[repr(transparent)]
pub struct PlatformInfo(sgx_platform_info_t);
impl_ffi_wrapper! {
PlatformInfo, sgx_platform_info_t, U101, platform_info;
}
#[cfg(feature = "use_prost")]
derive_prost_message_from_repr_bytes!(PlatformInfo);
#[cfg(feature = "use_serde")]
derive_serde_from_repr_bytes!(PlatformInfo);
#[cfg(test)]
mod test {
use super::*;
#[cfg(feature = "use_serde")]
use bincode::{deserialize, serialize};
#[cfg(feature = "use_serde")]
#[test]
fn serde() {
let src = sgx_platform_info_t {
platform_info: [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
],
};
let pib = PlatformInfo::from(src);
let serialized = serialize(&pib).expect("Could not serialize pib");
let pib2 = deserialize::<PlatformInfo>(&serialized).expect("Could not deserialize pib");
assert_eq!(pib, pib2);
}
}
| 34.428571 | 99 | 0.634336 |
0e31cc421e00750a2ceea2129b519a922ef9f508 | 18,814 | #![deny(missing_docs)]
//! This crate provides a [Egui](https://github.com/emilk/egui) integration for the [Bevy](https://github.com/bevyengine/bevy) game engine.
//!
//! **Trying out:**
//!
//! An example WASM project is live at [mvlabat.github.io/bevy_egui_web_showcase](https://mvlabat.github.io/bevy_egui_web_showcase/index.html) [[source](https://github.com/mvlabat/bevy_egui_web_showcase)].
//!
//! **Features:**
//! - Desktop and web ([bevy_webgl2](https://github.com/mrk-its/bevy_webgl2)) platforms support
//! - Clipboard (web support is limited to the same window, see [rust-windowing/winit#1829](https://github.com/rust-windowing/winit/issues/1829))
//! - Opening URLs
//! - Multiple windows support (see [./examples/two_windows.rs](./examples/two_windows.rs))
//!
//! `bevy_egui` can be compiled with using only `bevy` and `egui` as dependencies: `manage_clipboard` and `open_url` features,
//! that require additional crates, can be disabled.
//!
//! ## Usage
//!
//! Here's a minimal usage example:
//!
//! ```no_run,rust
//! use bevy::prelude::*;
//! use bevy_egui::{egui, EguiContext, EguiPlugin};
//!
//! fn main() {
//! App::build()
//! .add_plugins(DefaultPlugins)
//! .add_plugin(EguiPlugin)
//! .add_system(ui_example.system())
//! .run();
//! }
//!
//! fn ui_example(egui_context: Res<EguiContext>) {
//! egui::Window::new("Hello").show(egui_context.ctx(), |ui| {
//! ui.label("world");
//! });
//! }
//! ```
//!
//! For a more advanced example, see [examples/ui.rs](examples/ui.rs).
//!
//! ```bash
//! cargo run --example ui
//! ```
//!
//! ## See also
//!
//! - [`bevy-inspector-egui`](https://github.com/jakobhellermann/bevy-inspector-egui)
//! - [`bevy_megaui`](https://github.com/mvlabat/bevy_megaui)
pub use egui;
mod egui_node;
mod systems;
mod transform_node;
use crate::{egui_node::EguiNode, systems::*, transform_node::EguiTransformNode};
use bevy::{
app::{AppBuilder, CoreStage, Plugin},
asset::{Assets, Handle, HandleUntyped},
ecs::{
schedule::{ParallelSystemDescriptorCoercion, StageLabel, SystemLabel, SystemStage},
system::IntoSystem,
},
input::InputSystem,
log,
reflect::TypeUuid,
render::{
pipeline::{
BlendFactor, BlendOperation, BlendState, ColorTargetState, ColorWrite, CompareFunction,
CullMode, DepthBiasState, DepthStencilState, FrontFace, MultisampleState,
PipelineDescriptor, PrimitiveState, StencilFaceState, StencilState,
},
render_graph::{base, base::Msaa, RenderGraph, WindowSwapChainNode, WindowTextureNode},
shader::{Shader, ShaderStage, ShaderStages},
texture::{Texture, TextureFormat},
RenderStage,
},
utils::HashMap,
window::WindowId,
};
#[cfg(all(feature = "manage_clipboard", not(target_arch = "wasm32")))]
use clipboard::{ClipboardContext, ClipboardProvider};
#[cfg(all(feature = "manage_clipboard", not(target_arch = "wasm32")))]
use std::cell::{RefCell, RefMut};
#[cfg(all(feature = "manage_clipboard", not(target_arch = "wasm32")))]
use thread_local::ThreadLocal;
/// A handle pointing to the egui [`PipelineDescriptor`].
pub const EGUI_PIPELINE_HANDLE: HandleUntyped =
HandleUntyped::weak_from_u64(PipelineDescriptor::TYPE_UUID, 9404026720151354217);
/// Name of the transform uniform.
pub const EGUI_TRANSFORM_RESOURCE_BINDING_NAME: &str = "EguiTransform";
/// Name of the texture uniform.
pub const EGUI_TEXTURE_RESOURCE_BINDING_NAME: &str = "EguiTexture_texture";
/// Adds all Egui resources and render graph nodes.
pub struct EguiPlugin;
/// A resource for storing global UI settings.
#[derive(Clone, Debug, PartialEq)]
pub struct EguiSettings {
/// Global scale factor for egui widgets (`1.0` by default).
///
/// This setting can be used to force the UI to render in physical pixels regardless of DPI as follows:
/// ```rust
/// use bevy::prelude::*;
/// use bevy_egui::EguiSettings;
///
/// fn update_ui_scale_factor(mut egui_settings: ResMut<EguiSettings>, windows: Res<Windows>) {
/// if let Some(window) = windows.get_primary() {
/// egui_settings.scale_factor = 1.0 / window.scale_factor();
/// }
/// }
/// ```
pub scale_factor: f64,
}
impl Default for EguiSettings {
fn default() -> Self {
Self { scale_factor: 1.0 }
}
}
/// Is used for storing the input passed to Egui. The actual resource is a [`HashMap<WindowId, EguiInput>`].
///
/// It gets reset during the [`EguiSystem::ProcessInput`] system.
#[derive(Clone, Debug, Default)]
pub struct EguiInput {
/// Egui's raw input.
pub raw_input: egui::RawInput,
}
/// A resource for accessing clipboard.
///
/// The resource is available only if `manage_clipboard` feature is enabled.
#[cfg(feature = "manage_clipboard")]
#[derive(Default)]
pub struct EguiClipboard {
#[cfg(not(target_arch = "wasm32"))]
clipboard: ThreadLocal<Option<RefCell<ClipboardContext>>>,
#[cfg(target_arch = "wasm32")]
clipboard: String,
}
#[cfg(feature = "manage_clipboard")]
impl EguiClipboard {
/// Sets clipboard contents.
pub fn set_contents(&mut self, contents: &str) {
self.set_contents_impl(contents);
}
/// Gets clipboard contents. Returns [`None`] if clipboard provider is unavailable or returns an error.
pub fn get_contents(&self) -> Option<String> {
self.get_contents_impl()
}
#[cfg(not(target_arch = "wasm32"))]
fn set_contents_impl(&self, contents: &str) {
if let Some(mut clipboard) = self.get() {
if let Err(err) = clipboard.set_contents(contents.to_owned()) {
log::error!("Failed to set clipboard contents: {:?}", err);
}
}
}
#[cfg(target_arch = "wasm32")]
fn set_contents_impl(&mut self, contents: &str) {
self.clipboard = contents.to_owned();
}
#[cfg(not(target_arch = "wasm32"))]
fn get_contents_impl(&self) -> Option<String> {
if let Some(mut clipboard) = self.get() {
match clipboard.get_contents() {
Ok(contents) => return Some(contents),
Err(err) => log::info!("Failed to get clipboard contents: {:?}", err),
}
};
None
}
#[cfg(target_arch = "wasm32")]
#[allow(clippy::unnecessary_wraps)]
fn get_contents_impl(&self) -> Option<String> {
Some(self.clipboard.clone())
}
#[cfg(not(target_arch = "wasm32"))]
fn get(&self) -> Option<RefMut<ClipboardContext>> {
self.clipboard
.get_or(|| {
ClipboardContext::new()
.map(RefCell::new)
.map_err(|err| {
log::info!("Failed to initialize clipboard: {:?}", err);
})
.ok()
})
.as_ref()
.map(|cell| cell.borrow_mut())
}
}
/// Is used for storing Egui shapes. The actual resource is [`HashMap<WindowId, EguiShapes>`].
#[derive(Clone, Default)]
pub struct EguiShapes {
/// Pairs of rectangles and paint commands.
///
/// The field gets populated during the [`EguiStage::UiFrameEnd`] stage and reset during `EguiNode::update`.
pub shapes: Vec<egui::paint::ClippedShape>,
}
/// Is used for storing Egui output. The actual resource is [`HashMap<WindowId, EguiOutput>`].
#[derive(Clone, Default)]
pub struct EguiOutput {
/// The field gets updated during the [`EguiStage::UiFrameEnd`] stage.
pub output: egui::Output,
}
/// A resource for storing `bevy_egui` context.
pub struct EguiContext {
ctx: HashMap<WindowId, egui::CtxRef>,
egui_textures: HashMap<egui::TextureId, Handle<Texture>>,
mouse_position: Option<(f32, f32)>,
}
impl EguiContext {
fn new() -> Self {
Self {
ctx: HashMap::default(),
egui_textures: Default::default(),
mouse_position: Some((0.0, 0.0)),
}
}
/// Egui context of the primary window.
#[track_caller]
pub fn ctx(&self) -> &egui::CtxRef {
self.ctx.get(&WindowId::primary()).expect("`EguiContext::ctx()` called before the ctx has been initialized. Consider moving your UI system to `CoreStage::Update` or run you system after `EguiSystem::BeginFrame`.")
}
/// Egui context for a specific window.
/// If you want to display UI on a non-primary window,
/// make sure to set up the render graph by calling [`setup_pipeline`].
#[track_caller]
pub fn ctx_for_window(&self, window: WindowId) -> &egui::CtxRef {
&self
.ctx
.get(&window)
.ok_or_else(|| format!("window with id {} not found", window))
.unwrap()
}
/// Fallible variant of [`EguiContext::ctx_for_window`]. Make sure to set up the render graph by calling [`setup_pipeline`].
pub fn try_ctx_for_window(&self, window: WindowId) -> Option<&egui::CtxRef> {
self.ctx.get(&window)
}
/// Can accept either a strong or a weak handle.
///
/// You may want to pass a weak handle if you control removing texture assets in your
/// application manually and you don't want to bother with cleaning up textures in egui.
///
/// You'll want to pass a strong handle if a texture is used only in egui and there's no
/// handle copies stored anywhere else.
pub fn set_egui_texture(&mut self, id: u64, texture: Handle<Texture>) {
log::debug!("Set egui texture: {:?}", texture);
self.egui_textures
.insert(egui::TextureId::User(id), texture);
}
/// Removes a texture handle associated with the id.
pub fn remove_egui_texture(&mut self, id: u64) {
let texture_handle = self.egui_textures.remove(&egui::TextureId::User(id));
log::debug!("Remove egui texture: {:?}", texture_handle);
}
// Is called when we get an event that a texture asset is removed.
fn remove_texture(&mut self, texture_handle: &Handle<Texture>) {
log::debug!("Removing egui handles: {:?}", texture_handle);
self.egui_textures = self
.egui_textures
.iter()
.map(|(id, texture)| (*id, texture.clone()))
.filter(|(_, texture)| texture != texture_handle)
.collect();
}
}
#[doc(hidden)]
#[derive(Debug, Default, Clone, PartialEq)]
pub struct WindowSize {
physical_width: f32,
physical_height: f32,
scale_factor: f32,
}
impl WindowSize {
fn new(physical_width: f32, physical_height: f32, scale_factor: f32) -> Self {
Self {
physical_width,
physical_height,
scale_factor,
}
}
#[inline]
fn width(&self) -> f32 {
self.physical_width / self.scale_factor
}
#[inline]
fn height(&self) -> f32 {
self.physical_height / self.scale_factor
}
}
/// The names of `bevy_egui` nodes.
pub mod node {
/// The main egui pass.
pub const EGUI_PASS: &str = "egui_pass";
/// Keeps the transform uniform up to date.
pub const EGUI_TRANSFORM: &str = "egui_transform";
}
#[derive(StageLabel, Clone, Hash, Debug, Eq, PartialEq)]
/// The names of `bevy_egui` stages.
pub enum EguiStage {
/// Runs before [`bevy::render::RenderStage::RenderResource`]. This is where we read Egui's output.
UiFrameEnd,
}
#[derive(SystemLabel, Clone, Hash, Debug, Eq, PartialEq)]
/// The names of egui systems.
pub enum EguiSystem {
/// Reads Egui inputs (keyboard, mouse, etc) and writes them into the [`EguiInput`] resource.
///
/// To modify the input, you can hook your system like this:
///
/// `system.after(EguiSystem::ProcessInput).before(EguiSystem::BeginFrame)`.
ProcessInput,
/// Begins the `egui` frame
BeginFrame,
/// Processes the [`EguiOutput`] resource
ProcessOutput,
}
impl Plugin for EguiPlugin {
fn build(&self, app: &mut AppBuilder) {
app.add_stage_before(
RenderStage::RenderResource,
EguiStage::UiFrameEnd,
SystemStage::parallel(),
);
app.add_system_to_stage(
CoreStage::PreUpdate,
process_input
.system()
.label(EguiSystem::ProcessInput)
.after(InputSystem),
);
app.add_system_to_stage(
CoreStage::PreUpdate,
begin_frame
.system()
.label(EguiSystem::BeginFrame)
.after(EguiSystem::ProcessInput),
);
app.add_system_to_stage(
EguiStage::UiFrameEnd,
process_output.system().label(EguiSystem::ProcessOutput),
);
let world = app.world_mut();
world.get_resource_or_insert_with(EguiSettings::default);
world.get_resource_or_insert_with(HashMap::<WindowId, EguiInput>::default);
world.get_resource_or_insert_with(HashMap::<WindowId, EguiOutput>::default);
world.get_resource_or_insert_with(HashMap::<WindowId, WindowSize>::default);
world.get_resource_or_insert_with(HashMap::<WindowId, EguiShapes>::default);
#[cfg(feature = "manage_clipboard")]
world.get_resource_or_insert_with(EguiClipboard::default);
world.insert_resource(EguiContext::new());
let world = world.cell();
let mut pipelines = world
.get_resource_mut::<Assets<PipelineDescriptor>>()
.unwrap();
let msaa = world.get_resource::<Msaa>().unwrap();
let mut shaders = world.get_resource_mut::<Assets<Shader>>().unwrap();
pipelines.set_untracked(
EGUI_PIPELINE_HANDLE,
build_egui_pipeline(&mut shaders, msaa.samples),
);
let mut render_graph = world.get_resource_mut::<RenderGraph>().unwrap();
setup_pipeline(&mut render_graph, &msaa, RenderGraphConfig::default());
}
}
/// Egui's render graph config.
#[allow(missing_docs)]
pub struct RenderGraphConfig {
pub window_id: WindowId,
pub egui_pass: &'static str,
pub main_pass: &'static str,
pub swap_chain_node: &'static str,
pub depth_texture: &'static str,
pub sampled_color_attachment: &'static str,
pub transform_node: &'static str,
}
impl Default for RenderGraphConfig {
fn default() -> Self {
RenderGraphConfig {
window_id: WindowId::primary(),
egui_pass: node::EGUI_PASS,
main_pass: base::node::MAIN_PASS,
swap_chain_node: base::node::PRIMARY_SWAP_CHAIN,
depth_texture: base::node::MAIN_DEPTH_TEXTURE,
sampled_color_attachment: base::node::MAIN_SAMPLED_COLOR_ATTACHMENT,
transform_node: node::EGUI_TRANSFORM,
}
}
}
/// Set up egui render pipeline.
///
/// The pipeline for the primary window will already be set up by the [`EguiPlugin`],
/// so you'll only need to manually call this if you want to use multiple windows.
pub fn setup_pipeline(render_graph: &mut RenderGraph, msaa: &Msaa, config: RenderGraphConfig) {
render_graph.add_node(config.egui_pass, EguiNode::new(&msaa, config.window_id));
render_graph
.add_node_edge(config.main_pass, config.egui_pass)
.unwrap();
if let Ok(ui_pass) = render_graph.get_node_id(bevy::ui::node::UI_PASS) {
render_graph
.add_node_edge(ui_pass, config.egui_pass)
.unwrap();
}
render_graph
.add_slot_edge(
config.swap_chain_node,
WindowSwapChainNode::OUT_TEXTURE,
config.egui_pass,
if msaa.samples > 1 {
"color_resolve_target"
} else {
"color_attachment"
},
)
.unwrap();
render_graph
.add_slot_edge(
config.depth_texture,
WindowTextureNode::OUT_TEXTURE,
config.egui_pass,
"depth",
)
.unwrap();
if msaa.samples > 1 {
render_graph
.add_slot_edge(
config.sampled_color_attachment,
WindowSwapChainNode::OUT_TEXTURE,
config.egui_pass,
"color_attachment",
)
.unwrap();
}
render_graph.add_system_node(
config.transform_node,
EguiTransformNode::new(config.window_id),
);
render_graph
.add_node_edge(config.transform_node, config.egui_pass)
.unwrap();
}
fn build_egui_pipeline(shaders: &mut Assets<Shader>, sample_count: u32) -> PipelineDescriptor {
PipelineDescriptor {
primitive: PrimitiveState {
front_face: FrontFace::Cw,
cull_mode: CullMode::None,
..Default::default()
},
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::LessEqual,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
clamp_depth: false,
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
color_blend: BlendState {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha_blend: BlendState {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
write_mask: ColorWrite::ALL,
}],
multisample: MultisampleState {
count: sample_count,
mask: !0,
alpha_to_coverage_enabled: false,
},
..PipelineDescriptor::new(ShaderStages {
vertex: shaders.add(Shader::from_glsl(
ShaderStage::Vertex,
if cfg!(target_arch = "wasm32") {
include_str!("egui.es.vert")
} else {
include_str!("egui.vert")
},
)),
fragment: Some(shaders.add(Shader::from_glsl(
ShaderStage::Fragment,
if cfg!(target_arch = "wasm32") {
include_str!("egui.es.frag")
} else {
include_str!("egui.frag")
},
))),
})
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_readme_deps() {
version_sync::assert_markdown_deps_updated!("README.md");
}
}
| 33.899099 | 221 | 0.610131 |
ab66de815a0ba91284e3c42f67f3a008fcb5117e | 259 | use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct WikiPageData {
pub content_md: String,
pub content_html: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WikiPage {
pub data: WikiPageData,
}
| 19.923077 | 40 | 0.722008 |
33cc21425ebe12449087fa5b94dd048703a74fb1 | 5,950 | // SPDX-License-Identifier: Apache-2.0
// This file is part of Frontier.
//
// Copyright (c) 2020 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::SelfContainedCall;
use frame_support::weights::{DispatchInfo, GetDispatchInfo};
use sp_runtime::{
traits::{
self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf,
SignedExtension, ValidateUnsigned,
},
transaction_validity::{
InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError,
},
};
#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)]
pub enum CheckedSignature<AccountId, Extra, SelfContainedSignedInfo> {
Signed(AccountId, Extra),
Unsigned,
SelfContained(SelfContainedSignedInfo),
}
/// Definition of something that the external world might want to say; its
/// existence implies that it has been checked and is good, particularly with
/// regards to the signature.
#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)]
pub struct CheckedExtrinsic<AccountId, Call, Extra, SelfContainedSignedInfo> {
/// Who this purports to be from and the number of extrinsics have come before
/// from the same signer, if anyone (note this is not a signature).
pub signed: CheckedSignature<AccountId, Extra, SelfContainedSignedInfo>,
/// The function that should be called.
pub function: Call,
}
impl<AccountId, Call: GetDispatchInfo, Extra, SelfContainedSignedInfo> GetDispatchInfo
for CheckedExtrinsic<AccountId, Call, Extra, SelfContainedSignedInfo>
{
fn get_dispatch_info(&self) -> DispatchInfo {
self.function.get_dispatch_info()
}
}
impl<AccountId, Call, Extra, SelfContainedSignedInfo, Origin> traits::Applyable
for CheckedExtrinsic<AccountId, Call, Extra, SelfContainedSignedInfo>
where
AccountId: Member + MaybeDisplay,
Call: Member
+ Dispatchable<Origin = Origin>
+ SelfContainedCall<SignedInfo = SelfContainedSignedInfo>,
Extra: SignedExtension<AccountId = AccountId, Call = Call>,
Origin: From<Option<AccountId>>,
SelfContainedSignedInfo: Send + Sync + 'static,
{
type Call = Call;
fn validate<U: ValidateUnsigned<Call = Self::Call>>(
&self,
// TODO [#5006;ToDr] should source be passed to `SignedExtension`s?
// Perhaps a change for 2.0 to avoid breaking too much APIs?
source: TransactionSource,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> TransactionValidity {
match &self.signed {
CheckedSignature::Signed(id, extra) => {
Extra::validate(extra, id, &self.function, info, len)
}
CheckedSignature::Unsigned => {
let valid = Extra::validate_unsigned(&self.function, info, len)?;
let unsigned_validation = U::validate_unsigned(source, &self.function)?;
Ok(valid.combine_with(unsigned_validation))
}
CheckedSignature::SelfContained(signed_info) => {
self.function.validate_self_contained(&signed_info).ok_or(
TransactionValidityError::Invalid(InvalidTransaction::BadProof),
)?
}
}
}
fn apply<U: ValidateUnsigned<Call = Self::Call>>(
self,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> sp_runtime::ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> {
match self.signed {
CheckedSignature::Signed(id, extra) => {
let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?;
let maybe_who = Some(id);
let res = self.function.dispatch(Origin::from(maybe_who));
let post_info = match res {
Ok(info) => info,
Err(err) => err.post_info,
};
Extra::post_dispatch(
pre,
info,
&post_info,
len,
&res.map(|_| ()).map_err(|e| e.error),
)?;
Ok(res)
}
CheckedSignature::Unsigned => {
let pre = Extra::pre_dispatch_unsigned(&self.function, info, len)?;
U::pre_dispatch(&self.function)?;
let maybe_who = None;
let res = self.function.dispatch(Origin::from(maybe_who));
let post_info = match res {
Ok(info) => info,
Err(err) => err.post_info,
};
Extra::post_dispatch(
pre,
info,
&post_info,
len,
&res.map(|_| ()).map_err(|e| e.error),
)?;
Ok(res)
}
CheckedSignature::SelfContained(signed_info) => {
// If pre-dispatch fail, the block must be considered invalid
self.function
.pre_dispatch_self_contained(&signed_info)
.ok_or(TransactionValidityError::Invalid(
InvalidTransaction::BadProof,
))??;
Ok(self.function.apply_self_contained(signed_info).ok_or(
TransactionValidityError::Invalid(InvalidTransaction::BadProof),
)?)
}
}
}
}
| 39.403974 | 93 | 0.601849 |
e6fbd0fab24359e01c7619ba634c00a7d775c7cf | 1,758 | extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use std::fmt;
use std::fs;
use std::str;
#[derive(Deserialize, PartialEq)]
struct Coordinate {
x: f64,
y: f64,
z: f64,
}
impl fmt::Display for Coordinate {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
formatter,
"Coordinate {{ x: {:e}, y: {:e}, z: {} }}",
self.x, self.y, self.z
)
}
}
#[derive(Deserialize)]
struct TestStruct {
coordinates: Vec<Coordinate>,
}
fn notify(msg: &str) {
use std::io::Write;
if let Ok(mut stream) = std::net::TcpStream::connect("localhost:9001") {
stream.write_all(msg.as_bytes()).unwrap();
}
}
fn calc(s: &str) -> Coordinate {
let jobj: TestStruct = serde_json::from_str(&s).unwrap();
let len = jobj.coordinates.len() as f64;
let mut x = 0_f64;
let mut y = 0_f64;
let mut z = 0_f64;
for coord in &jobj.coordinates {
x += coord.x;
y += coord.y;
z += coord.z;
}
Coordinate {
x: x / len,
y: y / len,
z: z / len,
}
}
fn main() {
let right = Coordinate {
x: 2.0,
y: 0.5,
z: 0.25,
};
for v in &[
"{\"coordinates\":[{\"x\":2.0,\"y\":0.5,\"z\":0.25}]}",
"{\"coordinates\":[{\"y\":0.5,\"x\":2.0,\"z\":0.25}]}",
] {
let left = calc(v);
if left != right {
eprintln!("{} != {}", left, right);
std::process::exit(-1);
}
}
let s = fs::read_to_string("/tmp/1.json").unwrap();
notify(&format!("Rust (Serde Typed)\t{}", std::process::id()));
let results = calc(&s);
notify("stop");
println!("{}", results);
}
| 20.44186 | 76 | 0.498862 |
1c702eee882ba84b9cf4c2bcd0271cd0834a5feb | 2,926 | use super::*;
const D1: [u8; XPRV_SIZE] = [
0xf8, 0xa2, 0x92, 0x31, 0xee, 0x38, 0xd6, 0xc5, 0xbf, 0x71, 0x5d, 0x5b, 0xac, 0x21, 0xc7, 0x50,
0x57, 0x7a, 0xa3, 0x79, 0x8b, 0x22, 0xd7, 0x9d, 0x65, 0xbf, 0x97, 0xd6, 0xfa, 0xde, 0xa1, 0x5a,
0xdc, 0xd1, 0xee, 0x1a, 0xbd, 0xf7, 0x8b, 0xd4, 0xbe, 0x64, 0x73, 0x1a, 0x12, 0xde, 0xb9, 0x4d,
0x36, 0x71, 0x78, 0x41, 0x12, 0xeb, 0x6f, 0x36, 0x4b, 0x87, 0x18, 0x51, 0xfd, 0x1c, 0x9a, 0x24,
0x73, 0x84, 0xdb, 0x9a, 0xd6, 0x00, 0x3b, 0xbd, 0x08, 0xb3, 0xb1, 0xdd, 0xc0, 0xd0, 0x7a, 0x59,
0x72, 0x93, 0xff, 0x85, 0xe9, 0x61, 0xbf, 0x25, 0x2b, 0x33, 0x12, 0x62, 0xed, 0xdf, 0xad, 0x0d,
];
const D1_H0: [u8; XPRV_SIZE] = [
0x60, 0xd3, 0x99, 0xda, 0x83, 0xef, 0x80, 0xd8, 0xd4, 0xf8, 0xd2, 0x23, 0x23, 0x9e, 0xfd, 0xc2,
0xb8, 0xfe, 0xf3, 0x87, 0xe1, 0xb5, 0x21, 0x91, 0x37, 0xff, 0xb4, 0xe8, 0xfb, 0xde, 0xa1, 0x5a,
0xdc, 0x93, 0x66, 0xb7, 0xd0, 0x03, 0xaf, 0x37, 0xc1, 0x13, 0x96, 0xde, 0x9a, 0x83, 0x73, 0x4e,
0x30, 0xe0, 0x5e, 0x85, 0x1e, 0xfa, 0x32, 0x74, 0x5c, 0x9c, 0xd7, 0xb4, 0x27, 0x12, 0xc8, 0x90,
0x60, 0x87, 0x63, 0x77, 0x0e, 0xdd, 0xf7, 0x72, 0x48, 0xab, 0x65, 0x29, 0x84, 0xb2, 0x1b, 0x84,
0x97, 0x60, 0xd1, 0xda, 0x74, 0xa6, 0xf5, 0xbd, 0x63, 0x3c, 0xe4, 0x1a, 0xdc, 0xee, 0xf0, 0x7a,
];
const MSG: &'static [u8] = b"Hello World";
const D1_H0_SIGNATURE: [u8; 64] = [
0x90, 0x19, 0x4d, 0x57, 0xcd, 0xe4, 0xfd, 0xad, 0xd0, 0x1e, 0xb7, 0xcf, 0x16, 0x17, 0x80, 0xc2,
0x77, 0xe1, 0x29, 0xfc, 0x71, 0x35, 0xb9, 0x77, 0x79, 0xa3, 0x26, 0x88, 0x37, 0xe4, 0xcd, 0x2e,
0x94, 0x44, 0xb9, 0xbb, 0x91, 0xc0, 0xe8, 0x4d, 0x23, 0xbb, 0xa8, 0x70, 0xdf, 0x3c, 0x4b, 0xda,
0x91, 0xa1, 0x10, 0xef, 0x73, 0x56, 0x38, 0xfa, 0x7a, 0x34, 0xea, 0x20, 0x46, 0xd4, 0xbe, 0x04,
];
fn compare_xprv(xprv: &[u8], expected_xprv: &[u8]) {
assert_eq!(
xprv[64..].to_vec(),
expected_xprv[64..].to_vec(),
"chain code"
);
assert_eq!(
xprv[..64].to_vec(),
expected_xprv[..64].to_vec(),
"extended key"
);
}
fn derive_xprv_eq(parent_xprv: &XPrv, idx: DerivationIndex, expected_xprv: [u8; 96]) {
let child_xprv = parent_xprv.derive(DerivationScheme::V2, idx);
compare_xprv(child_xprv.as_ref(), &expected_xprv);
}
fn do_sign(xprv: &XPrv, expected_signature: &[u8]) {
let signature: Signature<Vec<u8>> = xprv.sign(MSG);
assert_eq!(signature.as_ref(), expected_signature);
}
#[test]
fn xprv_sign() {
let prv = XPrv::from_bytes_verified(D1_H0).unwrap();
do_sign(&prv, &D1_H0_SIGNATURE);
}
#[test]
fn verify_signature() {
let prv = XPrv::from_bytes_verified(D1_H0).unwrap();
let xpub = prv.public();
let sig: Signature<u8> = Signature::from_slice(&D1_H0_SIGNATURE).unwrap();
assert_eq!(xpub.verify(MSG, &sig), true)
}
#[test]
fn xprv_derive() {
let prv = XPrv::from_bytes_verified(D1).unwrap();
derive_xprv_eq(&prv, 0x80000000, D1_H0);
}
| 40.638889 | 99 | 0.637389 |
fe1207b0bc2bd0431959c2948fe18b73cd3cf8ec | 8,634 | use std::fmt;
use chrono::{DateTime, Utc};
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rand::prelude::IteratorRandom;
use rusqlite::{
ffi::{Error as SqliteFfiError, ErrorCode},
params, Error as SqliteError,
};
use tap::Pipe;
use thiserror::Error;
pub struct Quote {
pub quote: String,
pub username: String,
pub when: Option<DateTime<Utc>>,
pub key: Option<String>,
}
impl fmt::Display for Quote {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"{}\" - @{}", self.quote, self.username)?;
if let Some(when) = self.when {
write!(f, ", {}", when.format("%d %b %Y"))?;
}
if let Some(key) = &self.key {
write!(f, " (#{})", key)?;
}
Ok(())
}
}
/// Storage of custom commands in an SQLite3 database.
#[derive(Debug, Clone)]
pub struct QuotesStore {
conn_pool: Pool<SqliteConnectionManager>,
}
impl QuotesStore {
/// Create a `QuotesStore` with a connection to a database.
pub fn new(conn_pool: Pool<SqliteConnectionManager>) -> Self {
Self { conn_pool }
}
pub fn add_quote_unkeyed(
&self,
channel: &str,
username: &str,
text: &str,
time: DateTime<Utc>,
) -> Result<(), QuotesError> {
let conn = self.conn_pool.get()?;
match conn.execute(
r#"
INSERT OR ROLLBACK INTO quotes (channel, username, quote, time)
VALUES (?1, ?2, ?3, ?4);
"#,
params![channel, username, text, time],
) {
Ok(_) => Ok(()),
Err(SqliteError::SqliteFailure(
SqliteFfiError {
code: ErrorCode::ConstraintViolation,
..
},
_,
)) => Err(QuotesError::DuplicateQuote {
channel: channel.into(),
username: username.into(),
text: text.into(),
}),
Err(err) => Err(err.into()),
}
}
pub fn add_quote_keyed(
&self,
channel: &str,
username: &str,
key: &str,
text: &str,
time: DateTime<Utc>,
) -> Result<(), QuotesError> {
let conn = self.conn_pool.get()?;
match conn.execute(
r#"
INSERT OR ROLLBACK INTO quotes (channel, username, key, quote, time)
VALUES (?1, ?2, ?3, ?4, ?5);
"#,
params![channel, username, key, text, time],
) {
Ok(_) => Ok(()),
Err(SqliteError::SqliteFailure(
SqliteFfiError {
code: ErrorCode::ConstraintViolation,
..
},
_,
)) => {
// If we've failed due to a constraint violation here, it could either be because
// the key is already used for another quote or because the text of the quote
// already exists for this user. We'll run a query for any quotes with the provided
// key to determine which one it is.
let mut key_stmt = conn.prepare(
r#"
SELECT key
FROM quotes
WHERE key = ?1
LIMIT 1;
"#,
)?;
let mut same_key = key_stmt.query(params![key])?;
if same_key.next()?.is_some() {
QuotesError::DuplicateKey {
channel: channel.into(),
key: key.into(),
}
} else {
QuotesError::DuplicateQuote {
channel: channel.into(),
username: username.into(),
text: text.into(),
}
}
.pipe(Err)
}
Err(err) => Err(err.into()),
}
}
pub fn get_quote_keyed(&self, channel: &str, key: &str) -> Result<Option<Quote>, QuotesError> {
let conn = self.conn_pool.get()?;
let mut stmt = conn.prepare(
r#"
SELECT channel, quote, username, time, key
FROM quotes
WHERE channel = ?1 AND key = ?2
LIMIT 1;
"#,
)?;
let mut rows = stmt.query(params![channel, key])?;
if let Some(row) = rows.next()? {
Quote {
quote: row.get(1)?,
username: row.get(2)?,
when: row.get(3)?,
key: row.get(4)?,
}
.pipe(Some)
.pipe(Ok)
} else {
Ok(None)
}
}
pub fn get_quote_random(&self, channel: &str) -> Result<Option<Quote>, QuotesError> {
let conn = self.conn_pool.get()?;
let mut stmt = conn.prepare(
r#"
SELECT channel, quote, username, time, key
FROM quotes
WHERE channel = ?1;
"#,
)?;
let all = stmt
.query_map(params![channel], |row| {
Quote {
quote: row.get(1)?,
username: row.get(2)?,
when: row.get(3)?,
key: row.get(4)?,
}
.pipe(Ok)
})?
.collect::<Result<Vec<_>, _>>()?;
all.into_iter().choose(&mut rand::thread_rng()).pipe(Ok)
}
}
#[derive(Debug, Error)]
pub enum QuotesError {
#[error("duplicate quote from @{username} in channel {channel}: {text}")]
DuplicateQuote {
channel: String,
username: String,
text: String,
},
#[error("duplicate quote key #{key} in channel {channel}")]
DuplicateKey { channel: String, key: String },
#[error("rusqlite error: {0}")]
Rusqlite(#[from] rusqlite::Error),
#[error("r2d2 error: {0}")]
R2d2(#[from] r2d2::Error),
}
#[cfg(test)]
mod tests {
use std::ops::DerefMut;
use tempfile::{tempdir, TempDir};
use super::*;
fn storage() -> (TempDir, QuotesStore) {
let db_dir = tempdir().expect("creating a temporary directory should succeed");
let db_path = db_dir.path().join("db.sqlite3");
let manager = SqliteConnectionManager::file(&db_path);
let conn_pool = Pool::new(manager).expect("creating a connection pool should succeed");
let mut conn = conn_pool
.get()
.expect("getting a connection from the pool should succeed");
crate::db::migrations::runner()
.run(conn.deref_mut())
.expect("running migrations should succeed");
(db_dir, QuotesStore::new(conn_pool))
}
// #[test]
// fn set_command() {
// let (_db_dir, commands) = storage();
//
// let response = commands
// .get_command("asdf", "command")
// .expect("attempting to get the command should succeed");
//
// assert!(
// response.is_none(),
// "no response should be returned if the command doesn't exist"
// );
//
// commands
// .set_command("asdf", "command", "this is the response to the command")
// .expect("setting the command should succeed");
//
// let response2 = commands
// .get_command("asdf", "command")
// .expect("attempting to get the command should succeed");
//
// assert!(
// response2.is_some(),
// "a response should be returned if the command does exist"
// );
// }
//
// #[test]
// fn update_command() {
// let (_db_dir, commands) = storage();
//
// commands
// .set_command(
// "qwerty",
// "updatethis",
// "this is the response to the command",
// )
// .expect("setting the command the first time should succeed");
//
// commands
// .set_command("qwerty", "updatethis", "now i've changed the response")
// .expect("setting the command again should succeed in updating it");
//
// let response = commands
// .get_command("qwerty", "updatethis")
// .expect("attempting to get the command should succeed");
//
// assert_eq!(
// response.expect("response should be Some"),
// "now i've changed the response".to_owned(),
// "response should have been updated"
// );
// }
}
| 29.467577 | 99 | 0.480774 |
2272e9123777468d39aa699aab117f2008b5f472 | 1,710 | #![deny(warnings, rust_2018_idioms)]
#![forbid(unsafe_code)]
pub use linkerd_dns_name::InvalidName;
use std::{fmt, ops::Deref, str::FromStr, sync::Arc};
/// An endpoint's identity.
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct Name(Arc<linkerd_dns_name::Name>);
/// A newtype for local server identities.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct LocalId(pub Name);
// === impl Name ===
impl From<linkerd_dns_name::Name> for Name {
fn from(n: linkerd_dns_name::Name) -> Self {
Name(Arc::new(n))
}
}
impl FromStr for Name {
type Err = InvalidName;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.ends_with('.') {
return Err(InvalidName); // SNI hostnames are implicitly absolute.
}
linkerd_dns_name::Name::from_str(s).map(|n| Name(Arc::new(n)))
}
}
impl Deref for Name {
type Target = linkerd_dns_name::Name;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fmt::Debug::fmt(&self.0, f)
}
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fmt::Display::fmt(&self.0, f)
}
}
impl From<LocalId> for Name {
fn from(LocalId(name): LocalId) -> Name {
name
}
}
// === impl LocalId ===
impl From<Name> for LocalId {
fn from(n: Name) -> Self {
Self(n)
}
}
impl Deref for LocalId {
type Target = Name;
fn deref(&self) -> &Name {
&self.0
}
}
impl fmt::Display for LocalId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
| 20.853659 | 78 | 0.582456 |
79e67589c877712bc966e10513e25cf71fed8706 | 108 | #[macro_use]
extern crate peroxide;
#[allow(unused_imports)]
use peroxide::fuga::*;
mod o3;
mod dataframe;
| 13.5 | 24 | 0.731481 |
fb47aa31435d6cf98d6c9d3c0cb9e809a456da30 | 1,714 | /*
Copyright 2017 Dennis Vesterlund
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate getopts;
use getopts::Options;
use std::env;
mod listener;
mod writer;
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut listener: bool = false;
let mut adress = String::from("0.0.0.0");
let mut port = String::from("8888");
let mut opts = Options::new();
opts.optflag("l", "listen", "Listen on port ");
opts.optflag("h", "help", "Print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => panic!(" {}", e),
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
if matches.opt_present("l") {
listener = true;
}
if matches.free.len() == 2 {
adress = matches.free[0].clone();
port = matches.free[1].clone();
};
if listener {
listener::listen(&adress, &port);
} else {
writer::write(&adress, &port);
}
}
| 25.969697 | 75 | 0.616103 |
ddda87aeb5e52964185f1ba4314e4d313e37458b | 28,123 | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
#[cfg(not(feature = "prost-codec"))]
use kvproto::cdcpb::*;
#[cfg(feature = "prost-codec")]
use kvproto::cdcpb::{
event::{
row::OpType as EventRowOpType, Entries as EventEntries, Error as EventError,
Event as Event_oneof_event, LogType as EventLogType, Row as EventRow,
},
ChangeDataEvent, Event,
};
use futures::sync::mpsc::*;
use kvproto::metapb::{Region, RegionEpoch};
use kvproto::raft_cmdpb::{AdminCmdType, AdminRequest, AdminResponse, CmdType, Request};
use raftstore::store::util::compare_region_epoch;
use raftstore::Error as RaftStoreError;
use resolved_ts::Resolver;
use tikv::storage::mvcc::{Lock, LockType, WriteRef, WriteType};
use tikv::storage::txn::TxnEntry;
use tikv_util::collections::HashMap;
use txn_types::{Key, TimeStamp};
use crate::Error;
static DOWNSTREAM_ID_ALLOC: AtomicUsize = AtomicUsize::new(0);
/// A unique identifier of a Downstream.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct DownstreamID(usize);
impl DownstreamID {
pub fn new() -> DownstreamID {
DownstreamID(DOWNSTREAM_ID_ALLOC.fetch_add(1, Ordering::SeqCst))
}
}
#[derive(Clone)]
pub struct Downstream {
// TODO: include cdc request.
/// A unique identifier of the Downstream.
pub id: DownstreamID,
// The IP address of downstream.
peer: String,
region_epoch: RegionEpoch,
sink: UnboundedSender<ChangeDataEvent>,
}
impl Downstream {
/// Create a Downsteam.
///
/// peer is the address of the downstream.
/// sink sends data to the downstream.
pub fn new(
peer: String,
region_epoch: RegionEpoch,
sink: UnboundedSender<ChangeDataEvent>,
) -> Downstream {
Downstream {
id: DownstreamID::new(),
peer,
sink,
region_epoch,
}
}
fn sink(&self, change_data: ChangeDataEvent) {
if self.sink.unbounded_send(change_data).is_err() {
error!("send event failed"; "downstream" => %self.peer);
}
}
}
#[derive(Default)]
struct Pending {
// Batch of RaftCommand observed from raftstore
// TODO add multi_batch once CDC observer is ready
multi_batch: (),
downstreams: Vec<Downstream>,
scan: Vec<(DownstreamID, Vec<Option<TxnEntry>>)>,
}
/// A CDC delegate of a raftstore region peer.
///
/// It converts raft commands into CDC events and broadcast to downstreams.
/// It also track trancation on the fly in order to compute resolved ts.
pub struct Delegate {
pub region_id: u64,
region: Option<Region>,
pub downstreams: Vec<Downstream>,
pub resolver: Option<Resolver>,
pending: Option<Pending>,
enabled: Arc<AtomicBool>,
failed: bool,
}
impl Delegate {
/// Create a Delegate the given region.
pub fn new(region_id: u64) -> Delegate {
Delegate {
region_id,
downstreams: Vec::new(),
resolver: None,
region: None,
pending: Some(Pending::default()),
enabled: Arc::new(AtomicBool::new(true)),
failed: false,
}
}
/// Returns a shared flag.
/// True if there are some active downstreams subscribe the region.
/// False if all downstreams has unsubscribed.
pub fn enabled(&self) -> Arc<AtomicBool> {
self.enabled.clone()
}
pub fn subscribe(&mut self, downstream: Downstream) {
if let Some(region) = self.region.as_ref() {
if let Err(e) = compare_region_epoch(
&downstream.region_epoch,
region,
false, /* check_conf_ver */
true, /* check_ver */
true, /* include_region */
) {
let err = Error::Request(e.into());
let change_data_error = self.error_event(err);
downstream.sink(change_data_error);
return;
}
self.downstreams.push(downstream);
} else {
self.pending.as_mut().unwrap().downstreams.push(downstream);
}
}
pub fn unsubscribe(&mut self, id: DownstreamID, err: Option<Error>) -> bool {
let change_data_error = err.map(|err| self.error_event(err));
let downstreams = if self.pending.is_some() {
&mut self.pending.as_mut().unwrap().downstreams
} else {
&mut self.downstreams
};
downstreams.retain(|d| {
if d.id == id {
if let Some(change_data_error) = change_data_error.clone() {
d.sink(change_data_error);
}
}
d.id != id
});
let is_last = self.downstreams.is_empty();
if is_last {
self.enabled.store(false, Ordering::SeqCst);
}
is_last
}
fn error_event(&self, err: Error) -> ChangeDataEvent {
let mut change_data_event = Event::default();
let mut cdc_err = EventError::default();
let mut err = err.extract_error_header();
if err.has_region_not_found() {
let region_not_found = err.take_region_not_found();
cdc_err.set_region_not_found(region_not_found);
} else if err.has_not_leader() {
let not_leader = err.take_not_leader();
cdc_err.set_not_leader(not_leader);
} else if err.has_epoch_not_match() {
let epoch_not_match = err.take_epoch_not_match();
cdc_err.set_epoch_not_match(epoch_not_match);
} else {
panic!(
"region met unknown error region_id: {}, error: {:?}",
self.region_id, err
);
}
change_data_event.event = Some(Event_oneof_event::Error(cdc_err));
change_data_event.region_id = self.region_id;
let mut change_data = ChangeDataEvent::default();
change_data.mut_events().push(change_data_event);
change_data
}
/// Fail the delegate
///
/// This means the region has met an unrecoverable error for CDC.
/// It broadcasts errors to all downstream and stops.
pub fn fail(&mut self, err: Error) {
// Stop observe further events.
self.enabled.store(false, Ordering::SeqCst);
info!("region met error";
"region_id" => self.region_id, "error" => ?err);
let change_data = self.error_event(err);
self.broadcast(change_data);
// Mark this delegate has failed.
self.failed = true;
}
pub fn has_failed(&self) -> bool {
self.failed
}
fn broadcast(&self, change_data: ChangeDataEvent) {
let downstreams = if self.pending.is_some() {
&self.pending.as_ref().unwrap().downstreams
} else {
&self.downstreams
};
for d in downstreams {
d.sink(change_data.clone());
}
}
/// Install a resolver and notify downstreams this region if ready to serve.
pub fn on_region_ready(&mut self, resolver: Resolver, region: Region) {
assert!(
self.resolver.is_none(),
"region resolver should not be ready"
);
self.resolver = Some(resolver);
self.region = Some(region);
if let Some(pending) = self.pending.take() {
// Re-subscribe pending downstreams.
for downstream in pending.downstreams {
self.subscribe(downstream);
}
for (downstream_id, entries) in pending.scan {
self.on_scan(downstream_id, entries);
}
// TODO iter multi_batch once CDC observer is ready.
// for batch in pending.multi_batch {
// self.on_batch(batch);
// }
}
info!("region is ready"; "region_id" => self.region_id);
}
/// Try advance and broadcast resolved ts.
pub fn on_min_ts(&mut self, min_ts: TimeStamp) {
if self.resolver.is_none() {
info!("region resolver not ready";
"region_id" => self.region_id, "min_ts" => min_ts);
return;
}
info!("try to advance ts"; "region_id" => self.region_id);
let resolver = self.resolver.as_mut().unwrap();
let resolved_ts = match resolver.resolve(min_ts) {
Some(rts) => rts,
None => return,
};
info!("resolved ts updated";
"region_id" => self.region_id, "resolved_ts" => resolved_ts);
let mut change_data_event = Event::default();
change_data_event.region_id = self.region_id;
change_data_event.event = Some(Event_oneof_event::ResolvedTs(resolved_ts.into_inner()));
let mut change_data = ChangeDataEvent::default();
change_data.mut_events().push(change_data_event);
self.broadcast(change_data);
}
// TODO fill on_batch when CDC observer is ready.
pub fn on_batch(&mut self, _batch: () /* CmdBatch */) {
unimplemented!()
}
pub fn on_scan(&mut self, downstream_id: DownstreamID, entries: Vec<Option<TxnEntry>>) {
if let Some(pending) = self.pending.as_mut() {
pending.scan.push((downstream_id, entries));
return;
}
let d = if let Some(d) = self.downstreams.iter_mut().find(|d| d.id == downstream_id) {
d
} else {
warn!("downstream not found"; "downstream_id" => ?downstream_id);
return;
};
let mut rows = Vec::with_capacity(entries.len());
for entry in entries {
match entry {
Some(TxnEntry::Prewrite { default, lock }) => {
let mut row = EventRow::default();
let skip = decode_lock(lock.0, &lock.1, &mut row);
if skip {
continue;
}
decode_default(default.1, &mut row);
rows.push(row);
}
Some(TxnEntry::Commit { default, write }) => {
let mut row = EventRow::default();
let skip = decode_write(write.0, &write.1, &mut row);
if skip {
continue;
}
decode_default(default.1, &mut row);
// This type means the row is self-contained, it has,
// 1. start_ts
// 2. commit_ts
// 3. key
// 4. value
if row.get_type() == EventLogType::Rollback {
// We dont need to send rollbacks to downstream,
// because downstream does not needs rollback to clean
// prewrite as it drops all previous stashed data.
continue;
}
set_event_row_type(&mut row, EventLogType::Committed);
rows.push(row);
}
None => {
let mut row = EventRow::default();
// This type means scan has finised.
set_event_row_type(&mut row, EventLogType::Initialized);
rows.push(row);
}
}
}
let mut event_entries = EventEntries::default();
event_entries.entries = rows.into();
let mut change_data_event = Event::default();
change_data_event.region_id = self.region_id;
change_data_event.event = Some(Event_oneof_event::Entries(event_entries));
let mut change_data = ChangeDataEvent::default();
change_data.mut_events().push(change_data_event);
d.sink(change_data);
}
fn sink_data(&mut self, index: u64, requests: Vec<Request>) {
let mut rows = HashMap::default();
for mut req in requests {
// CDC cares about put requests only.
if req.get_cmd_type() != CmdType::Put {
// Do not log delete requests because they are issued by GC
// frequently.
if req.get_cmd_type() != CmdType::Delete {
debug!(
"skip other command";
"region_id" => self.region_id,
"command" => ?req,
);
}
continue;
}
let mut put = req.take_put();
match put.cf.as_str() {
"write" => {
let mut row = EventRow::default();
let skip = decode_write(put.take_key(), put.get_value(), &mut row);
if skip {
continue;
}
// In order to advance resolved ts,
// we must untrack inflight txns if they are committed.
assert!(self.resolver.is_some(), "region resolver should be ready");
let resolver = self.resolver.as_mut().unwrap();
let commit_ts = if row.commit_ts == 0 {
None
} else {
Some(row.commit_ts)
};
resolver.untrack_lock(
row.start_ts.into(),
commit_ts.map(Into::into),
row.key.clone(),
);
let r = rows.insert(row.key.clone(), row);
assert!(r.is_none());
}
"lock" => {
let mut row = EventRow::default();
let skip = decode_lock(put.take_key(), put.get_value(), &mut row);
if skip {
continue;
}
let occupied = rows.entry(row.key.clone()).or_default();
if !occupied.value.is_empty() {
assert!(row.value.is_empty());
let mut value = vec![];
mem::swap(&mut occupied.value, &mut value);
row.value = value;
}
// In order to compute resolved ts,
// we must track inflight txns.
assert!(self.resolver.is_some(), "region resolver should be ready");
let resolver = self.resolver.as_mut().unwrap();
resolver.track_lock(row.start_ts.into(), row.key.clone());
*occupied = row;
}
"" | "default" => {
let key = Key::from_encoded(put.take_key()).truncate_ts().unwrap();
let row = rows.entry(key.to_raw().unwrap()).or_default();
decode_default(put.take_value(), row);
}
other => {
panic!("invalid cf {}", other);
}
}
}
let mut entries = Vec::with_capacity(rows.len());
for (_, v) in rows {
entries.push(v);
}
let mut event_entries = EventEntries::default();
event_entries.entries = entries.into();
let mut change_data_event = Event::default();
change_data_event.region_id = self.region_id;
change_data_event.index = index;
change_data_event.event = Some(Event_oneof_event::Entries(event_entries));
let mut change_data = ChangeDataEvent::default();
change_data.mut_events().push(change_data_event);
self.broadcast(change_data);
}
fn sink_admin(&mut self, request: AdminRequest, mut response: AdminResponse) {
let store_err = match request.get_cmd_type() {
AdminCmdType::Split => RaftStoreError::EpochNotMatch(
"split".to_owned(),
vec![
response.mut_split().take_left(),
response.mut_split().take_right(),
],
),
AdminCmdType::BatchSplit => RaftStoreError::EpochNotMatch(
"batchsplit".to_owned(),
response.mut_splits().take_regions().into(),
),
AdminCmdType::PrepareMerge
| AdminCmdType::CommitMerge
| AdminCmdType::RollbackMerge => {
RaftStoreError::EpochNotMatch("merge".to_owned(), vec![])
}
_ => return,
};
let err = Error::Request(store_err.into());
self.fail(err);
}
}
fn set_event_row_type(row: &mut EventRow, ty: EventLogType) {
#[cfg(feature = "prost-codec")]
{
row.r#type = ty.into();
}
#[cfg(not(feature = "prost-codec"))]
{
row.r_type = ty;
}
}
fn decode_write(key: Vec<u8>, value: &[u8], row: &mut EventRow) -> bool {
let write = WriteRef::parse(value).unwrap().to_owned();
let (op_type, r_type) = match write.write_type {
WriteType::Put => (EventRowOpType::Put, EventLogType::Commit),
WriteType::Delete => (EventRowOpType::Delete, EventLogType::Commit),
WriteType::Rollback => (EventRowOpType::Unknown, EventLogType::Rollback),
other => {
debug!("skip write record"; "write" => ?other);
return true;
}
};
let key = Key::from_encoded(key);
let commit_ts = if write.write_type == WriteType::Rollback {
0
} else {
key.decode_ts().unwrap().into_inner()
};
row.start_ts = write.start_ts.into_inner();
row.commit_ts = commit_ts;
row.key = key.truncate_ts().unwrap().to_raw().unwrap();
row.op_type = op_type.into();
set_event_row_type(row, r_type);
if let Some(value) = write.short_value {
row.value = value;
}
false
}
fn decode_lock(key: Vec<u8>, value: &[u8], row: &mut EventRow) -> bool {
let lock = Lock::parse(value).unwrap();
let op_type = match lock.lock_type {
LockType::Put => EventRowOpType::Put,
LockType::Delete => EventRowOpType::Delete,
other => {
info!("skip lock record";
"type" => ?other,
"start_ts" => ?lock.ts,
"for_update_ts" => ?lock.for_update_ts);
return true;
}
};
let key = Key::from_encoded(key);
row.start_ts = lock.ts.into_inner();
row.key = key.to_raw().unwrap();
row.op_type = op_type.into();
set_event_row_type(row, EventLogType::Prewrite);
if let Some(value) = lock.short_value {
row.value = value;
}
false
}
fn decode_default(value: Vec<u8>, row: &mut EventRow) {
if !value.is_empty() {
row.value = value.to_vec();
}
}
#[cfg(test)]
mod tests {
use super::*;
use engine::rocks::*;
use engine_rocks::{RocksEngine, RocksSnapshot};
use engine_traits::Snapshot;
use futures::{Future, Stream};
use kvproto::errorpb::Error as ErrorHeader;
use kvproto::metapb::Region;
use kvproto::raft_cmdpb::{RaftCmdRequest, RaftCmdResponse, Response};
use kvproto::raft_serverpb::RaftMessage;
use raftstore::router::RaftStoreRouter;
use raftstore::store::{Callback, CasualMessage, ReadResponse, RegionSnapshot, SignificantMsg};
use raftstore::Result as RaftStoreResult;
use std::cell::Cell;
use std::sync::Arc;
use tikv::server::RaftKv;
use tikv::storage::mvcc::test_util::*;
use tikv::storage::mvcc::tests::*;
use tikv_util::mpsc::{bounded, Sender as UtilSender};
// TODO add test_txn once cdc observer is ready.
// https://github.com/overvenus/tikv/blob/447d10ae80b5b7fc58a4bef4631874a11237fdcf/components/cdc/src/delegate.rs#L615-L701
#[test]
fn test_error() {
let region_id = 1;
let mut region = Region::default();
region.set_id(region_id);
region.mut_peers().push(Default::default());
region.mut_region_epoch().set_version(2);
region.mut_region_epoch().set_conf_ver(2);
let region_epoch = region.get_region_epoch().clone();
let (sink, events) = unbounded();
let mut delegate = Delegate::new(region_id);
delegate.subscribe(Downstream::new(String::new(), region_epoch, sink));
let enabled = delegate.enabled();
assert!(enabled.load(Ordering::SeqCst));
let mut resolver = Resolver::new();
resolver.init();
delegate.on_region_ready(resolver, region);
let events_wrap = Cell::new(Some(events));
let receive_error = || {
let (change_data, events) = events_wrap
.replace(None)
.unwrap()
.into_future()
.wait()
.unwrap();
events_wrap.set(Some(events));
let mut change_data = change_data.unwrap();
assert_eq!(change_data.events.len(), 1);
let change_data_event = &mut change_data.events[0];
let event = change_data_event.event.take().unwrap();
match event {
Event_oneof_event::Error(err) => err,
_ => panic!("unknown event"),
}
};
let mut err_header = ErrorHeader::default();
err_header.set_not_leader(Default::default());
delegate.fail(Error::Request(err_header));
let err = receive_error();
assert!(err.has_not_leader());
// Enable is disabled by any error.
assert!(!enabled.load(Ordering::SeqCst));
let mut err_header = ErrorHeader::default();
err_header.set_region_not_found(Default::default());
delegate.fail(Error::Request(err_header));
let err = receive_error();
assert!(err.has_region_not_found());
let mut err_header = ErrorHeader::default();
err_header.set_epoch_not_match(Default::default());
delegate.fail(Error::Request(err_header));
let err = receive_error();
assert!(err.has_epoch_not_match());
// Split
let mut region = Region::default();
region.set_id(1);
let mut request = AdminRequest::default();
request.set_cmd_type(AdminCmdType::Split);
let mut response = AdminResponse::default();
response.mut_split().set_left(region.clone());
delegate.sink_admin(request, response);
let mut err = receive_error();
assert!(err.has_epoch_not_match());
err.take_epoch_not_match()
.current_regions
.into_iter()
.find(|r| r.get_id() == 1)
.unwrap();
let mut request = AdminRequest::default();
request.set_cmd_type(AdminCmdType::BatchSplit);
let mut response = AdminResponse::default();
response.mut_splits().set_regions(vec![region].into());
delegate.sink_admin(request, response);
let mut err = receive_error();
assert!(err.has_epoch_not_match());
err.take_epoch_not_match()
.current_regions
.into_iter()
.find(|r| r.get_id() == 1)
.unwrap();
// Merge
let mut request = AdminRequest::default();
request.set_cmd_type(AdminCmdType::PrepareMerge);
let response = AdminResponse::default();
delegate.sink_admin(request, response);
let mut err = receive_error();
assert!(err.has_epoch_not_match());
assert!(err.take_epoch_not_match().current_regions.is_empty());
let mut request = AdminRequest::default();
request.set_cmd_type(AdminCmdType::CommitMerge);
let response = AdminResponse::default();
delegate.sink_admin(request, response);
let mut err = receive_error();
assert!(err.has_epoch_not_match());
assert!(err.take_epoch_not_match().current_regions.is_empty());
let mut request = AdminRequest::default();
request.set_cmd_type(AdminCmdType::RollbackMerge);
let response = AdminResponse::default();
delegate.sink_admin(request, response);
let mut err = receive_error();
assert!(err.has_epoch_not_match());
assert!(err.take_epoch_not_match().current_regions.is_empty());
}
#[test]
fn test_scan() {
let region_id = 1;
let mut region = Region::default();
region.set_id(region_id);
region.mut_peers().push(Default::default());
region.mut_region_epoch().set_version(2);
region.mut_region_epoch().set_conf_ver(2);
let region_epoch = region.get_region_epoch().clone();
let (sink, events) = unbounded();
let mut delegate = Delegate::new(region_id);
let downstream = Downstream::new(String::new(), region_epoch, sink);
let downstream_id = downstream.id;
delegate.subscribe(downstream);
let enabled = delegate.enabled();
assert!(enabled.load(Ordering::SeqCst));
let events_wrap = Cell::new(Some(events));
let check_event = |event_rows: Vec<EventRow>| {
let (change_data, events) = events_wrap
.replace(None)
.unwrap()
.into_future()
.wait()
.unwrap();
events_wrap.set(Some(events));
let mut change_data = change_data.unwrap();
assert_eq!(change_data.events.len(), 1);
let change_data_event = &mut change_data.events[0];
assert_eq!(change_data_event.region_id, region_id);
assert_eq!(change_data_event.index, 0);
let event = change_data_event.event.take().unwrap();
match event {
Event_oneof_event::Entries(entries) => {
assert_eq!(entries.entries.as_slice(), event_rows.as_slice());
}
_ => panic!("unknown event"),
}
};
// Stashed in pending before region ready.
let entries = vec![
Some(
EntryBuilder {
key: b"a".to_vec(),
value: b"b".to_vec(),
start_ts: 1.into(),
commit_ts: 0.into(),
primary: vec![],
for_update_ts: 0.into(),
}
.build_prewrite(LockType::Put, false),
),
Some(
EntryBuilder {
key: b"a".to_vec(),
value: b"b".to_vec(),
start_ts: 1.into(),
commit_ts: 2.into(),
primary: vec![],
for_update_ts: 0.into(),
}
.build_commit(WriteType::Put, false),
),
Some(
EntryBuilder {
key: b"a".to_vec(),
value: b"b".to_vec(),
start_ts: 3.into(),
commit_ts: 0.into(),
primary: vec![],
for_update_ts: 0.into(),
}
.build_rollback(),
),
None,
];
delegate.on_scan(downstream_id, entries);
assert_eq!(delegate.pending.as_ref().unwrap().scan.len(), 1);
let mut resolver = Resolver::new();
resolver.init();
delegate.on_region_ready(resolver, region);
// Flush all pending entries.
let mut row1 = EventRow::default();
row1.start_ts = 1;
row1.commit_ts = 0;
row1.key = b"a".to_vec();
row1.op_type = EventRowOpType::Put.into();
set_event_row_type(&mut row1, EventLogType::Prewrite);
row1.value = b"b".to_vec();
let mut row2 = EventRow::default();
row2.start_ts = 1;
row2.commit_ts = 2;
row2.key = b"a".to_vec();
row2.op_type = EventRowOpType::Put.into();
set_event_row_type(&mut row2, EventLogType::Committed);
row2.value = b"b".to_vec();
let mut row3 = EventRow::default();
set_event_row_type(&mut row3, EventLogType::Initialized);
check_event(vec![row1, row2, row3]);
}
}
| 36.523377 | 127 | 0.549657 |
f55935b73c0339fab948cbe32ae92c0030c694fa | 3,434 | use crate::bootupd;
use crate::ipc::ClientToDaemonConnection;
use crate::model::Status;
use anyhow::Result;
use log::LevelFilter;
use structopt::clap::AppSettings;
use structopt::StructOpt;
/// `bootupctl` sub-commands.
#[derive(Debug, StructOpt)]
#[structopt(name = "bootupctl", about = "Bootupd client application")]
pub struct CtlCommand {
/// Verbosity level (higher is more verbose).
#[structopt(short = "v", parse(from_occurrences), global = true)]
verbosity: u8,
/// CLI sub-command.
#[structopt(subcommand)]
pub cmd: CtlVerb,
}
impl CtlCommand {
/// Return the log-level set via command-line flags.
pub(crate) fn loglevel(&self) -> LevelFilter {
match self.verbosity {
0 => LevelFilter::Warn,
1 => LevelFilter::Info,
2 => LevelFilter::Debug,
_ => LevelFilter::Trace,
}
}
}
/// CLI sub-commands.
#[derive(Debug, StructOpt)]
pub enum CtlVerb {
// FIXME(lucab): drop this after refreshing
// https://github.com/coreos/fedora-coreos-config/pull/595
#[structopt(name = "backend", setting = AppSettings::Hidden)]
Backend(CtlBackend),
#[structopt(name = "status", about = "Show components status")]
Status(StatusOpts),
#[structopt(name = "update", about = "Update all components")]
Update,
#[structopt(name = "validate", about = "Validate system state")]
Validate,
}
#[derive(Debug, StructOpt)]
pub enum CtlBackend {
#[structopt(name = "generate-update-metadata", setting = AppSettings::Hidden)]
Generate(super::bootupd::GenerateOpts),
#[structopt(name = "install", setting = AppSettings::Hidden)]
Install(super::bootupd::InstallOpts),
}
#[derive(Debug, StructOpt)]
pub struct StatusOpts {
// Output JSON
#[structopt(long)]
json: bool,
}
impl CtlCommand {
/// Run CLI application.
pub fn run(self) -> Result<()> {
match self.cmd {
CtlVerb::Status(opts) => Self::run_status(opts),
CtlVerb::Update => Self::run_update(),
CtlVerb::Validate => Self::run_validate(),
CtlVerb::Backend(CtlBackend::Generate(opts)) => {
super::bootupd::DCommand::run_generate_meta(opts)
}
CtlVerb::Backend(CtlBackend::Install(opts)) => {
super::bootupd::DCommand::run_install(opts)
}
}
}
/// Runner for `status` verb.
fn run_status(opts: StatusOpts) -> Result<()> {
let mut client = ClientToDaemonConnection::new();
client.connect()?;
let r: Status = client.send(&bootupd::ClientRequest::Status)?;
if opts.json {
let stdout = std::io::stdout();
let mut stdout = stdout.lock();
serde_json::to_writer_pretty(&mut stdout, &r)?;
} else {
bootupd::print_status(&r);
}
client.shutdown()?;
Ok(())
}
/// Runner for `update` verb.
fn run_update() -> Result<()> {
let mut client = ClientToDaemonConnection::new();
client.connect()?;
bootupd::client_run_update(&mut client)?;
client.shutdown()?;
Ok(())
}
/// Runner for `validate` verb.
fn run_validate() -> Result<()> {
let mut client = ClientToDaemonConnection::new();
client.connect()?;
bootupd::client_run_validate(&mut client)?;
client.shutdown()?;
Ok(())
}
}
| 29.101695 | 82 | 0.595515 |
c12058131a340a7da0d7b453849ca8aca0ca8ea2 | 9,765 | use super::{
BoundedArray,
OutOfBoundsAccess,
};
use crate::Index;
use core::marker::PhantomData;
/// A quad that represents one of 4 different states.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[repr(u8)]
#[allow(non_camel_case_types)]
pub enum quad {
/// Both bits are `0`.
B00 = 0b00,
/// Least-significant bit is `1`, other is `0`.
B01 = 0b01,
/// Most-significant bit is `1`, other is `0`.
B10 = 0b10,
/// Both bits are `1`.
B11 = 0b11,
}
impl Default for quad {
fn default() -> Self {
Self::B00
}
}
/// Types that can convert to and from a [`quad`].
pub trait Quad {
/// Converts a quad into `self`.
fn from_quad(value: quad) -> Self;
/// Converts `self` into a [`Quad`].
fn into_quad(self) -> quad;
}
impl Quad for quad {
#[inline]
fn from_quad(value: quad) -> Self {
value
}
#[inline]
fn into_quad(self) -> quad {
self
}
}
impl From<u8> for quad {
#[inline]
fn from(byte: u8) -> Self {
assert!(byte <= 0b11);
match byte {
0b00 => Self::B00,
0b01 => Self::B01,
0b10 => Self::B10,
0b11 => Self::B11,
_ => panic!("byte out of bounds for quad"),
}
}
}
impl From<quad> for u8 {
#[inline]
fn from(quad: quad) -> Self {
quad as u8
}
}
/// The raw type of a chunk in the [`BoundedQuadmap`].
///
/// Chunks are the raw entities that store the quads stored in the bounded quad map.
type Chunk = u32;
/// The number of bits used per quad stored in the [`BoundedQuadmap`].
const BITS_PER_QUAD: usize = 2;
/// The number of bits in a single chunk of the [`BoundedQuadmap`].
const CHUNK_LEN: usize = core::mem::size_of::<Chunk>() * 8;
/// An internal chunk index within the bounded quad map.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[repr(transparent)]
struct ChunkIndex {
value: usize,
}
impl Index for ChunkIndex {
#[inline]
fn from_index(index: usize) -> Self {
ChunkIndex {
value: index / (CHUNK_LEN / BITS_PER_QUAD),
}
}
#[inline]
fn into_index(self) -> usize {
self.value
}
}
/// An internal quad index within a chunk of the bounded quad map.
#[derive(Debug, Copy, Clone)]
#[repr(transparent)]
struct QuadIndex {
value: usize,
}
impl Index for QuadIndex {
#[inline]
fn from_index(index: usize) -> Self {
Self {
value: index % (CHUNK_LEN / BITS_PER_QUAD),
}
}
#[inline]
fn into_index(self) -> usize {
self.value
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BoundedQuadmap<Idx, T> {
len: usize,
chunks: BoundedArray<ChunkIndex, Chunk>,
marker: PhantomData<fn() -> (Idx, T)>,
}
impl<Idx, T> Default for BoundedQuadmap<Idx, T> {
fn default() -> Self {
Self {
len: 0,
chunks: BoundedArray::default(),
marker: Default::default(),
}
}
}
impl<Idx, T> BoundedQuadmap<Idx, T>
where
Idx: Index,
T: Default,
{
/// Returns the number of required chunks for the given amount of required quads.
fn required_chunks(required_quads: usize) -> usize {
required_quads.saturating_sub(1) * BITS_PER_QUAD / CHUNK_LEN + 1
}
/// Creates a new bounded quad map with the given length.
///
/// All elements are initialized with their default values.
pub fn with_len(len: usize) -> Self {
let len_chunks = Self::required_chunks(len);
Self {
len,
chunks: BoundedArray::with_len(len_chunks, |_| Default::default()),
marker: Default::default(),
}
}
/// Resizes the bounded quad map to the new length.
///
/// Shrinks the size if the new length is lower than the current length.
/// If the length is increased all new elements are initialized with their
/// default values.
pub fn resize_to_len(&mut self, new_len: usize) {
let len_chunks = Self::required_chunks(new_len);
self.chunks.resize_with(len_chunks, Default::default);
self.len = new_len;
}
}
impl<Idx, T> BoundedQuadmap<Idx, T>
where
Idx: Index,
{
/// Returns the number of quads that are stored in the bounded quad map.
#[inline]
pub fn len(&self) -> usize {
self.len
}
/// Returns `true` if the bounded quad map is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the bit mask for the quad at the given index.
///
/// # Note
///
/// The bit mask shadows all but the necessary bits for the quad to exact the quad
/// information that the given index refers to.
fn quad_index_to_mask(index: QuadIndex) -> Chunk {
0b11_u32 << (CHUNK_LEN - (BITS_PER_QUAD * (1 + index.into_index())))
}
/// Ensures that the given index is valid for the bounded quad map.
///
/// # Errors
///
/// If the given index is out of bounds.
fn ensure_valid_index(&self, index: Idx) -> Result<usize, OutOfBoundsAccess> {
let index = index.into_index();
if index >= self.len() {
return Err(OutOfBoundsAccess)
}
Ok(index)
}
}
impl<Idx, T> BoundedQuadmap<Idx, T>
where
Idx: Index,
T: Quad,
{
/// Returns the bit mask for the quad at the given index using another quad.
///
/// # Note
///
/// The bit mask shadows all but the necessary bits for the quad to exact the quad
/// information that the given index refers to.
/// The given quad's bit representation will be used at the bitmask for shadowing.
fn quad_index_to_mask_using(index: QuadIndex, flag: T) -> Chunk {
(u8::from(flag.into_quad()) as Chunk)
<< (CHUNK_LEN - (BITS_PER_QUAD * (1 + index.into_index())))
}
/// Splits the given index into chunk and quad indices.
fn split_index(idx: Idx) -> (ChunkIndex, QuadIndex) {
let raw_index = idx.into_index();
(
ChunkIndex::from_index(raw_index),
QuadIndex::from_index(raw_index),
)
}
/// Returns the quad at the given index.
///
/// # Errors
///
/// If the given index is out of bounds for the bounded array.
#[inline]
pub fn get(&self, index: Idx) -> Result<T, OutOfBoundsAccess> {
self.ensure_valid_index(index)?;
let (chunk_idx, quad_idx) = Self::split_index(index);
let chunk = self
.chunks
.get(chunk_idx)
.expect("unexpected out of bounds chunk");
let mask = Self::quad_index_to_mask(quad_idx);
let shift_len = CHUNK_LEN - (BITS_PER_QUAD * (1 + quad_idx.into_index()));
let value = (chunk & mask) >> shift_len;
debug_assert!(value <= 0b11);
Ok(T::from_quad(quad::from(value as u8)))
}
/// Sets the value of the quad at the given index.
///
/// # Errors
///
/// If the given index is out of bounds for the bounded array.
#[inline]
pub fn set(&mut self, index: Idx, new_value: T) -> Result<(), OutOfBoundsAccess> {
self.ensure_valid_index(index)?;
let (chunk_idx, quad_idx) = Self::split_index(index);
let chunk = self
.chunks
.get_mut(chunk_idx)
.expect("unexpected out of bounds chunk");
// Empty bits before eventually writing the new bit pattern.
// If there are bit access patterns that can combine these two steps we should do them instead.
*chunk &= !Self::quad_index_to_mask(quad_idx);
*chunk |= Self::quad_index_to_mask_using(quad_idx, new_value);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_works() {
let map = <BoundedQuadmap<usize, quad>>::default();
assert_eq!(map.len(), 0);
assert!(map.is_empty());
}
#[test]
fn with_len_works() {
let map = <BoundedQuadmap<usize, quad>>::with_len(10);
assert_eq!(map.len(), 10);
assert!(!map.is_empty());
for i in 0..10 {
assert_eq!(map.get(i), Ok(quad::B00));
}
}
#[test]
fn set_works() {
let mut map = <BoundedQuadmap<usize, quad>>::default();
map.resize_to_len(10);
assert_eq!(map.get(0), Ok(quad::B00));
map.set(0, quad::B01).unwrap();
assert_eq!(map.get(0), Ok(quad::B01));
map.set(0, quad::B10).unwrap();
assert_eq!(map.get(0), Ok(quad::B10));
map.set(0, quad::B11).unwrap();
assert_eq!(map.get(0), Ok(quad::B11));
}
#[test]
fn get_out_of_bounds_fails() {
let map = <BoundedQuadmap<usize, quad>>::with_len(3);
assert!(map.get(0).is_ok());
assert!(map.get(1).is_ok());
assert!(map.get(2).is_ok());
assert_eq!(map.get(3), Err(OutOfBoundsAccess));
}
#[test]
fn set_out_of_bounds_fails() {
let mut map = <BoundedQuadmap<usize, quad>>::with_len(3);
assert!(map.set(0, quad::B01).is_ok());
assert!(map.set(1, quad::B10).is_ok());
assert!(map.set(2, quad::B11).is_ok());
assert_eq!(map.set(3, quad::B11), Err(OutOfBoundsAccess));
}
#[test]
fn set_all_multiword_works() {
let len = 100;
let mut map = <BoundedQuadmap<usize, quad>>::with_len(len);
for i in 0..len {
assert_eq!(map.get(i), Ok(quad::B00));
let set_to = match i % 4 {
0 => quad::B00,
1 => quad::B01,
2 => quad::B10,
3 => quad::B11,
_ => unreachable!(),
};
map.set(i, set_to).unwrap();
assert_eq!(map.get(i), Ok(set_to));
}
}
}
| 27.820513 | 103 | 0.571121 |
692f6328b073765c29c0235cb227fa5e1d6653cc | 6,861 | use anyhow::{Context, Result};
use btleplug::api::{Central, Peripheral, UUID};
use failure::Fail;
use log::{debug, error, trace, warn};
use gilrs::ev::{Axis, Button, EventType};
use crate::ble;
use crate::errors::CarbleuratorError;
use crate::gamepad;
use crate::signaling::{update_signal_failure, update_signal_progress, update_signal_success};
const BLE_PERIPH_NAME: &str = "HC-08";
//const BLE_SVC_UUID: &str = "0000FFE0-0000-1000-8000-00805F9B34FB";
//const BLE_SVC_UUID_SHORT: u16 = 0xFFE0;
//const BLE_CHR_UUID: &str = "0000FFE1-0000-1000-8000-00805F9B34FB";
const BLE_CHR_UUID_SHORT: u16 = 0xFFE1;
pub(crate) struct Carbleurator {
gilrs: gilrs::Gilrs,
adapter: ble::Adapter,
d_x: i8,
d_y: i8,
}
impl Carbleurator {
pub(crate) fn new() -> Result<Self> {
let result = Self::init();
match &result {
Ok(_) => update_signal_progress(),
Err(e) => {
error!("Carbleurator initialization failure: {}", e);
update_signal_failure();
}
}
result
}
fn init() -> Result<Self> {
update_signal_progress();
trace!("Initializing gamepads...");
let gilrs = gamepad::init_gamepads()?;
for (_id, gamepad) in gilrs.gamepads() {
debug!("{} is {:?}", gamepad.name(), gamepad.power_info());
}
update_signal_progress();
trace!("Initializing bluetooth...");
let manager = ble::Manager::new().map_err(|e| e.compat())?;
update_signal_progress();
trace!("Initializing BLE central...");
let adapter = ble::get_central(&manager)?;
trace!("Carbleurator initialized.");
Ok(Carbleurator {
gilrs,
adapter,
d_x: 0,
d_y: 0,
})
}
pub(crate) fn event_loop(&mut self) {
loop {
trace!("Starting event processing...");
if let Err(e) = self.run_events() {
error!("Event processing failed with error {}", e);
update_signal_failure();
}
std::thread::sleep(std::time::Duration::from_secs(3));
update_signal_progress();
trace!("Retrying event processing...");
}
}
fn run_events(&mut self) -> Result<()> {
update_signal_progress();
trace!("Starting scan for BLE peripherals...");
self.adapter
.start_scan()
.map_err(|e| e.compat())
.with_context(|| "Failed to scan for new BLE peripherals".to_string())?;
update_signal_progress();
trace!("Waiting for devices to appear...");
std::thread::sleep(std::time::Duration::from_secs(1));
update_signal_progress();
let mut counter = 0;
let mut opt_peripheral = None;
trace!("Iterating over discovered devices searching for a compatible peripheral...");
while counter <= 5 && opt_peripheral.is_none() {
opt_peripheral = self
.adapter
.peripherals()
.into_iter()
.find(|x| x.properties().local_name == Some(BLE_PERIPH_NAME.to_string()));
if opt_peripheral.is_none() {
warn!("No compatible BLE peripherals found. Retrying...");
counter += 1;
std::thread::sleep(std::time::Duration::from_secs(1));
}
update_signal_progress();
}
let peripheral = opt_peripheral.ok_or(CarbleuratorError::BleAdapterDiscoveryTimeout)?;
trace!("BLE peripheral found. Connecting...");
peripheral.connect().map_err(|e| e.compat())?;
update_signal_progress();
trace!("Searching for correct peripheral characteristic for communication...");
let res_characteristics = peripheral
.discover_characteristics()
.map_err(|e| e.compat())?;
let characteristic = res_characteristics
.into_iter()
.find(|x| x.uuid == UUID::B16(BLE_CHR_UUID_SHORT))
.ok_or(CarbleuratorError::BleAdapterMissingCharacteristic)?;
trace!("Gamepad input configured, connected to compatible car, starting control loop...");
update_signal_success();
loop {
while let Some(gilrs::Event { event, .. }) = self.gilrs.next_event() {
trace!("Processing input event {:?}", event);
match event {
EventType::ButtonPressed(Button::DPadLeft, _) => self.d_x = -128,
EventType::ButtonReleased(Button::DPadLeft, _) => self.d_x = 0,
EventType::ButtonPressed(Button::DPadRight, _) => self.d_x = 127,
EventType::ButtonReleased(Button::DPadRight, _) => self.d_x = 0,
EventType::ButtonPressed(Button::DPadUp, _) => self.d_y = -128,
EventType::ButtonReleased(Button::DPadUp, _) => self.d_y = 0,
EventType::ButtonPressed(Button::DPadDown, _) => self.d_y = 127,
EventType::ButtonReleased(Button::DPadDown, _) => self.d_y = 0,
EventType::AxisChanged(Axis::DPadX, d_x, _) => self.d_x = (d_x * 128f32) as i8,
EventType::AxisChanged(Axis::DPadY, d_y, _) => self.d_y = (d_y * 128f32) as i8,
EventType::AxisChanged(Axis::LeftStickX, d_x, _) => {
self.d_x = (d_x * 128f32) as i8
}
EventType::AxisChanged(Axis::LeftStickY, d_y, _) => {
self.d_y = (d_y * 128f32) as i8
}
EventType::AxisChanged(Axis::RightStickX, d_x, _) => {
self.d_x = (d_x * 128f32) as i8
}
EventType::AxisChanged(Axis::RightStickY, d_y, _) => {
self.d_y = (d_y * 128f32) as i8
}
_ => {}
}
}
let msg: &[u8; 1] = match (self.d_x, self.d_y) {
(-63..=63, -63..=63) => b"s",
(_, 64..=127) => b"b",
(_, -128..=-64) => b"f",
(-128..=-64, -63..=63) => b"l",
(64..=127, -63..=63) => b"r",
};
// TODO: only send message if the msg value changed or after x amount of seconds have
// passed
trace!("Preparing to send message to vehicle: {:?}", msg);
peripheral
.command(&characteristic, msg)
.map_err(|e| e.compat())?;
// TODO: crank up sleep time each period we go without getting input, up to a
// predetermined limit, but reset the sleep time once we do get input
std::thread::sleep(std::time::Duration::from_millis(100));
}
}
}
| 38.982955 | 99 | 0.534616 |
23bc2fc609e85bbcc252ac2f82afb3599a5b14d9 | 11,552 | use std::collections::BTreeSet;
use edgeql_parser::tokenizer::{TokenStream, Kind};
use edgeql_parser::position::Pos;
use edgeql_parser::helpers::unquote_string;
use num_bigint::{BigInt, ToBigInt};
use bigdecimal::BigDecimal;
use crate::tokenizer::{CowToken};
#[derive(Debug, PartialEq)]
pub enum Value {
Str(String),
Int(i64),
Float(f64),
BigInt(BigInt),
Decimal(BigDecimal),
}
#[derive(Debug, PartialEq)]
pub struct Variable {
pub value: Value,
}
#[derive(Debug)]
pub struct Entry<'a> {
pub key: String,
pub tokens: Vec<CowToken<'a>>,
pub variables: Vec<Variable>,
pub end_pos: Pos,
pub named_args: bool,
pub first_arg: Option<usize>,
}
#[derive(Debug)]
pub enum Error {
Tokenizer(String, Pos),
Assertion(String, Pos),
}
fn push_var<'x>(res: &mut Vec<CowToken<'x>>, typ: &'x str, var: String,
start: Pos, end: Pos)
{
res.push(CowToken {kind: Kind::OpenParen, value: "(".into(), start, end});
res.push(CowToken {kind: Kind::Less, value: "<".into(), start, end});
res.push(CowToken {kind: Kind::Ident, value: typ.into(), start, end});
res.push(CowToken {kind: Kind::Greater, value: ">".into(), start, end});
res.push(CowToken {kind: Kind::Argument, value: var.into(), start, end});
res.push(CowToken {kind: Kind::CloseParen, value: ")".into(), start, end});
}
fn scan_vars<'x, 'y: 'x, I>(tokens: I) -> Option<(bool, usize)>
where I: IntoIterator<Item=&'x CowToken<'y>>,
{
let mut max_visited = None::<usize>;
let mut names = BTreeSet::new();
for t in tokens {
if t.kind == Kind::Argument {
if let Ok(v) = t.value[1..].parse() {
if max_visited.map(|old| v > old).unwrap_or(true) {
max_visited = Some(v);
}
} else {
names.insert(&t.value[..]);
}
}
}
if names.is_empty() {
let next = max_visited.map(|x| x.checked_add(1)).unwrap_or(Some(0))?;
Some((false, next))
} else if max_visited.is_some() {
return None // mixed arguments
} else {
Some((true, names.len()))
}
}
pub fn normalize<'x>(text: &'x str)
-> Result<Entry<'x>, Error>
{
use combine::easy::Error::*;
let mut token_stream = TokenStream::new(&text);
let mut tokens = Vec::new();
for res in &mut token_stream {
match res {
Ok(t) => tokens.push(CowToken::from(t)),
Err(Unexpected(s)) => {
return Err(Error::Tokenizer(
s.to_string(), token_stream.current_pos()));
}
Err(e) => {
return Err(Error::Tokenizer(
e.to_string(), token_stream.current_pos()));
}
}
}
let end_pos = token_stream.current_pos();
let (named_args, var_idx) = match scan_vars(&tokens) {
Some(pair) => pair,
None => {
// don't extract from invalid query, let python code do its work
return Ok(Entry {
key: serialize_tokens(&tokens),
tokens,
variables: Vec::new(),
end_pos,
named_args: false,
first_arg: None,
});
}
};
let mut rewritten_tokens = Vec::with_capacity(tokens.len());
let mut variables = Vec::new();
let next_var = |num: usize| {
if named_args {
format!("$__edb_arg_{}", var_idx + num)
} else {
format!("${}", var_idx + num)
}
};
for tok in &tokens {
match tok.kind {
Kind::IntConst
// Don't replace `.12` because this is a tuple access
if !matches!(rewritten_tokens.last(),
Some(CowToken { kind: Kind::Dot, .. }))
// Don't replace 'LIMIT 1' as a special case
&& (tok.value != "1"
|| !matches!(rewritten_tokens.last(),
Some(CowToken { kind: Kind::Keyword, ref value, .. })
if value.eq_ignore_ascii_case("LIMIT")))
&& tok.value != "9223372036854775808"
=> {
push_var(&mut rewritten_tokens, "__std__::int64",
next_var(variables.len()),
tok.start, tok.end);
variables.push(Variable {
value: Value::Int(tok.value.replace("_", "").parse()
.map_err(|e| Error::Tokenizer(
format!("can't parse integer: {}", e),
tok.start))?),
});
continue;
}
Kind::FloatConst => {
push_var(&mut rewritten_tokens, "__std__::float64",
next_var(variables.len()),
tok.start, tok.end);
let value = tok.value.replace("_", "").parse()
.map_err(|e| Error::Tokenizer(
format!("can't parse std::float64: {}", e),
tok.start))?;
if value == f64::INFINITY || value == -f64::INFINITY {
return Err(Error::Tokenizer(
format!("number is out of range for std::float64"),
tok.start));
}
variables.push(Variable {
value: Value::Float(value),
});
continue;
}
Kind::BigIntConst => {
push_var(&mut rewritten_tokens, "__std__::bigint",
next_var(variables.len()),
tok.start, tok.end);
let dec: BigDecimal = tok.value[..tok.value.len()-1]
.replace("_", "").parse()
.map_err(|e| Error::Tokenizer(
format!("can't parse bigint: {}", e),
tok.start))?;
variables.push(Variable {
value: Value::BigInt(dec.to_bigint()
.ok_or_else(|| Error::Assertion(
format!("number is not integer"),
tok.start))?),
});
continue;
}
Kind::DecimalConst => {
push_var(&mut rewritten_tokens, "__std__::decimal",
next_var(variables.len()),
tok.start, tok.end);
variables.push(Variable {
value: Value::Decimal(
tok.value[..tok.value.len()-1]
.replace("_", "")
.parse()
.map_err(|e| Error::Tokenizer(
format!("can't parse decimal: {}", e),
tok.start))?),
});
continue;
}
Kind::Str => {
push_var(&mut rewritten_tokens, "__std__::str",
next_var(variables.len()),
tok.start, tok.end);
variables.push(Variable {
value: Value::Str(unquote_string(&tok.value)
.map_err(|e| Error::Tokenizer(
format!("can't unquote string: {}", e),
tok.start))?.into()),
});
continue;
}
Kind::Keyword
if (matches!(&(&tok.value[..].to_uppercase())[..],
"CONFIGURE"|"CREATE"|"ALTER"|"DROP"|"START"))
=> {
return Ok(Entry {
key: serialize_tokens(&tokens),
tokens,
variables: Vec::new(),
end_pos,
named_args: false,
first_arg: None,
});
}
_ => rewritten_tokens.push(tok.clone()),
}
}
return Ok(Entry {
named_args,
first_arg: if variables.is_empty() { None } else { Some(var_idx) },
key: serialize_tokens(&rewritten_tokens[..]),
tokens: rewritten_tokens,
variables,
end_pos,
});
}
fn is_operator(token: &CowToken) -> bool {
use edgeql_parser::tokenizer::Kind::*;
match token.kind {
| Assign
| SubAssign
| AddAssign
| Arrow
| Coalesce
| Namespace
| ForwardLink
| BackwardLink
| FloorDiv
| Concat
| GreaterEq
| LessEq
| NotEq
| NotDistinctFrom
| DistinctFrom
| Comma
| OpenParen
| CloseParen
| OpenBracket
| CloseBracket
| OpenBrace
| CloseBrace
| Dot
| Semicolon
| Colon
| Add
| Sub
| Mul
| Div
| Modulo
| Pow
| Less
| Greater
| Eq
| Ampersand
| Pipe
| At
=> true,
| DecimalConst
| FloatConst
| IntConst
| BigIntConst
| BinStr
| Argument
| Str
| BacktickName
| Keyword
| Ident
=> false,
}
}
fn serialize_tokens(tokens: &[CowToken<'_>]) -> String {
use edgeql_parser::tokenizer::Kind::Argument;
let mut buf = String::new();
let mut needs_space = false;
for token in tokens {
if needs_space && !is_operator(token) && token.kind != Argument {
buf.push(' ');
}
buf.push_str(&token.value);
needs_space = !is_operator(token);
}
return buf;
}
#[cfg(test)]
mod test {
use super::scan_vars;
use combine::{StreamOnce, Positioned, easy::Error};
use edgeql_parser::tokenizer::{TokenStream};
use edgeql_parser::position::Pos;
use crate::tokenizer::{CowToken};
fn tokenize<'x>(s: &'x str) -> Vec<CowToken<'x>> {
let mut r = Vec::new();
let mut s = TokenStream::new(s);
loop {
match s.uncons() {
Ok(x) => r.push(CowToken {
kind: x.kind,
value: x.value.into(),
start: Pos { line: 0, column: 0, offset: 0 },
end: Pos { line: 0, column: 0, offset: 0 },
}),
Err(ref e) if e == &Error::end_of_input() => break,
Err(e) => panic!("Parse error at {}: {}", s.position(), e),
}
}
return r;
}
#[test]
fn none() {
assert_eq!(scan_vars(&tokenize("SELECT 1+1")).unwrap(), (false, 0));
}
#[test]
fn numeric() {
assert_eq!(scan_vars(&tokenize("$0 $1 $2")).unwrap(), (false, 3));
assert_eq!(scan_vars(&tokenize("$2 $3 $2")).unwrap(), (false, 4));
assert_eq!(scan_vars(&tokenize("$0 $0 $0")).unwrap(), (false, 1));
assert_eq!(scan_vars(&tokenize("$10 $100")).unwrap(), (false, 101));
}
#[test]
fn named() {
assert_eq!(scan_vars(&tokenize("$a")).unwrap(), (true, 1));
assert_eq!(scan_vars(&tokenize("$b $c $d")).unwrap(), (true, 3));
assert_eq!(scan_vars(&tokenize("$b $c $b")).unwrap(), (true, 2));
assert_eq!(scan_vars(&tokenize("$a $b $b $a $c $xx")).unwrap(),
(true, 4));
}
#[test]
fn mixed() {
assert_eq!(scan_vars(&tokenize("$a $0")), None);
assert_eq!(scan_vars(&tokenize("$0 $a")), None);
assert_eq!(scan_vars(&tokenize("$b $c $100")), None);
assert_eq!(scan_vars(&tokenize("$10 $xx $yy")), None);
}
}
| 32.088889 | 79 | 0.470048 |
e836459aed999fdbe7e3934aea2e05c413fb5955 | 13,303 | #[macro_use]
extern crate log;
use std::io::{BufRead, Seek, Write};
use std::marker::PhantomData;
use flate2::bufread::GzDecoder;
use flate2::write::GzEncoder;
use thiserror::Error;
use response_log::ResponseLog;
use warcio::compression::Compression;
use warcio::record::{Buffer, FinishError, InvalidRecord, Record};
use warcio::{FieldKind, RecordKind};
use crate::digest::Digester;
pub mod digest;
pub mod response_log;
// TODO: builder pattern seems worthwhile
pub struct Deduplicator<D, L, W> {
// The digester implementation must be stable over the life of
// a deduplicator, but we don't hold an instance.
digester: PhantomData<D>,
log: L,
decompress_buffer: Option<Buffer>,
output_compression: Compression,
output: W,
}
impl<D, L, W> Deduplicator<D, L, W>
where
D: Digester,
<D as Digester>::Digest: Eq + Clone,
L: ResponseLog<D::Digest>,
W: Write,
{
pub fn new(output: W, log: L, output_compression: Compression) -> Self {
Self {
digester: Default::default(),
log,
decompress_buffer: Some(Buffer::with_capacity(2 << 20)),
output_compression,
// TODO: io::copy can reuse a write buffer (from a BufWriter), may be worthwhile
output,
}
}
fn process_record<R: BufRead>(
&mut self,
record: &mut Record<R>,
) -> Result<ProcessOutcome, ProcessError> {
use ProcessOutcome::NeedsCopy;
// A record ID is required by the spec to be present, and we need to refer
// to an original record by ID. Noncompliant records will simply be copied.
let record_id = match record.header.get_field(FieldKind::RecordId) {
Some(id) => id,
None => {
warn!("Skipping record with no ID: impossible to WARC-Refers-To");
return Ok(NeedsCopy);
}
};
// TODO: update the WARC-Filename of warcinfo records?
// Only consider responses to be candidates for deduplication.
// The spec permits use of revisit records for any kind of record (noting it's
// typical for response and resource records), but it's likely to be most
// useful only for responses.
// TODO: truncated records might have the same payload after truncation as another truncated
// record but the truncated part might differ. Possibly ignore truncated records.
if record.header.get_field(FieldKind::Type).map_or(false, |t| RecordKind::Response != t) {
trace!(
"Skip non-response record {:?} of type {:?}",
record_id,
record.header.warc_type()
);
return Ok(NeedsCopy);
}
// For HTTP responses, we'll only digest the response body and ignore headers as
// specified by WARC 1.1 6.3.2 and RFC 2616.
let content_type: Option<mime::Mime> = record
.header
.get_field(FieldKind::ContentType)
.and_then(|s| s.parse().ok());
let content_is_http_response = content_type.map_or(false, |t| {
t.essence_str() == "application/http"
&& t.get_param("msgtype").map_or(true, |mt| mt == "response")
});
let uri_is_http = record
.header
.get_field(FieldKind::TargetURI)
.map_or(false, |uri| {
uri.starts_with("http:") || uri.starts_with("https:")
});
// The data representing HTTP headers which gets included in the revisit record.
// If non-empty, a WARC-Truncated record header with reason "length" will be output for
// deduplicated records with the contained data included in the record.
let mut prefix_data: Vec<u8> = Vec::new();
if content_is_http_response || uri_is_http {
let mut consumed_bytes = 0usize;
loop {
// Grab some data to try to parse. We could avoid copying some data in the case
// that the input's buffer entirely contains the response header, but copying the
// entire buffer and truncating makes it much easier to handle the case where the
// response header is larger than the input buffer.
prefix_data.extend_from_slice(record.fill_buf()?);
// We need a response to parse into, but don't actually care about the contents.
let mut headers_buf = [httparse::EMPTY_HEADER; 64];
let mut parsed_response = httparse::Response::new(&mut headers_buf);
match httparse::Response::parse(&mut parsed_response, &prefix_data) {
Ok(httparse::Status::Complete(n)) => {
// Advance input past HTTP headers to digest payload only
record.consume(n - consumed_bytes);
consumed_bytes += n;
// Truncate copied data to the same length as consumed data
prefix_data.truncate(consumed_bytes);
break;
}
Err(e) if consumed_bytes == 0 => {
// Can't parse HTTP headers, so treat entire record as payload
trace!(
"HTTP parse error in record, will digest entire content: {}",
e
);
break;
}
Err(e) => {
// Consumed data from the input and can't rewind, so will have to simply
// copy the record without deduplicating.
trace!("unrewindable HTTP parse error in record, will copy: {}", e);
return Ok(NeedsCopy);
}
Ok(httparse::Status::Partial) => {
// Consume the entire buffer from the input
record.consume(prefix_data.len() - consumed_bytes);
consumed_bytes = prefix_data.len();
}
}
}
}
// Digest payload and record the digest, checking for a match
// TODO: if an HTTP response uses chunked encoding, that may need to be handled here.
let digest = match D::digest_record(record)? {
None => {
debug!(
"Record {} is not eligible for deduplication; skipping",
record.header.record_id()
);
return Ok(NeedsCopy);
}
Some(d) => d,
};
debug!("Digested record to {:?}", digest);
let record_id = record.header.record_id();
// It is recommended that revisit records refer to the original target URI and date,
// but not mandatory.
let timestamp = record.header.get_field(FieldKind::Date);
let target = record.header.get_field(FieldKind::TargetURI);
let (original_id, original_uri, original_date) =
match self.log.add(record_id, target, timestamp, digest.clone()) {
None => return Ok(NeedsCopy),
Some(ids) => ids,
};
debug!(
"writing revisit record for {} referring to {} (URI {:?}, Date {:?})",
record_id, original_id, original_uri, original_date
);
// At this point we've found a record with matching digest to deduplicate against.
// We'll create a new revisit record to emit, starting by copying the fields from the
// original record then updating various fields.
// TODO clone header names but convert to WARC 1.1 (including translating URIs)
// TODO for small bodies (with small space savings) it's not worth writing a revisit
let mut dedup_headers = record.header.clone();
// We're emitting a revisit record according to WARC 1.1, based on an identical
// payload digest.
dedup_headers.set_field("WARC-Type", "revisit");
dedup_headers.set_field(
"WARC-Profile",
"http://netpreserve.org/warc/1.1/revisit/identical-payload-digest",
);
dedup_headers.set_field("WARC-Refers-To", original_id);
if let Some(original_uri) = original_uri {
dedup_headers.set_field("WARC-Refers-To-Target-URI", original_uri);
}
if let Some(original_date) = original_date {
dedup_headers.set_field("WARC-Refers-To-Date", original_date);
}
dedup_headers.set_field("WARC-Payload-Digest", D::format_digest(&digest));
dedup_headers.set_field("Content-Length", format!("{}", prefix_data.len()));
dedup_headers.set_field("WARC-Truncated", "length");
// TODO we have the entire prefix data, so can compute the new digest rather than dropping
dedup_headers.remove_field(FieldKind::BlockDigest.into_name());
// TODO: this seems to be writing extra garbage after the prefix data
let mut dedup_body = dedup_headers.write_to(&mut self.output, self.output_compression)?;
dedup_body.write_all(&prefix_data)?;
Ok(ProcessOutcome::Deduplicated)
}
/// Read and deduplicate a single record from the provided input.
///
/// Only data up to the end of the record will be consumed from the input.
///
/// Returns true on success if the record was deduplicated, or false if it was simply
/// copied.
pub fn read_record<R: BufRead + Seek>(
&mut self,
mut input: R,
compression: Compression,
) -> Result<bool, ProcessError> {
let start_offset = input.stream_position()?;
trace!(
"Deduplicator start record read from input offset {}",
start_offset
);
let mut record = Record::read_buffered_from(
input,
self.decompress_buffer
.take()
.expect("read_buffer should not be stolen"),
compression,
)?;
// TODO: if a payload digest is already present in the record we may be able to use that
// instead of computing a fresh one.
if self.process_record(&mut record)? == ProcessOutcome::NeedsCopy {
// Not a duplicate. Drop the record to regain access to the raw input so we can
// copy the raw record data with std::io::copy, taking advantage of OS-level copy
// acceleration like copy_file_range(2) or sendfile(2).
let (mut input, buffer) = record.finish()?;
self.decompress_buffer = Some(buffer);
let end_offset = input.stream_position()?;
input.seek(std::io::SeekFrom::Start(start_offset))?;
let mut raw_record = input.take(end_offset - start_offset);
trace!(
"Copy {} bytes to output, transform {:?} -> {:?}",
end_offset - start_offset,
compression,
self.output_compression
);
match (compression, self.output_compression) {
(Compression::None, Compression::None) | (Compression::Gzip, Compression::Gzip) => {
std::io::copy(&mut raw_record, &mut self.output)?
}
(Compression::None, Compression::Gzip) => std::io::copy(
&mut raw_record,
&mut GzEncoder::new(&mut self.output, flate2::Compression::best()),
)?,
(Compression::Gzip, Compression::None) => {
std::io::copy(&mut GzDecoder::new(&mut raw_record), &mut self.output)?
}
};
return Ok(false);
}
let (_, buffer) = record.finish()?;
self.decompress_buffer = Some(buffer);
Ok(true)
}
/// Read a stream of records, deduplicating and writing to the output until end of input.
///
/// Returns a pair of the number of records copied, and the number of records deduplicated.
/// The sum of these is the total number of records read.
pub fn read_stream<R: BufRead + Seek>(
&mut self,
mut input: R,
compression: Compression,
) -> Result<(u64, u64), ProcessError> {
let mut n_copied = 0;
let mut n_deduped = 0;
// TODO replace with a RecordReader
while !input.fill_buf()?.is_empty() {
let deduped = self.read_record(&mut input, compression)?;
if deduped {
n_deduped += 1;
} else {
n_copied += 1;
}
}
Ok((n_copied, n_deduped))
}
}
#[derive(Debug, PartialEq, Eq)]
enum ProcessOutcome {
Deduplicated,
NeedsCopy,
}
#[derive(Debug, Error)]
pub enum ProcessError {
#[error("record is malformed")]
InvalidRecord(#[from] InvalidRecord),
#[error("input was truncated")]
Truncated,
#[error("I/O error")]
IoError(#[from] std::io::Error),
}
impl From<FinishError> for ProcessError {
fn from(e: FinishError) -> Self {
match e {
FinishError::Io(e) => ProcessError::IoError(e),
FinishError::MissingTail => ProcessError::Truncated,
}
}
}
#[cfg(test)]
mod test;
| 40.43465 | 100 | 0.574682 |
0e3dd38f998ccf7eb41ad368d9cf402cf6b38ea5 | 268 | use yew_router::switch::{Permissive};
use yew_router::{Switch};
#[derive(Debug, Switch, Clone)]
pub enum AppRoute {
#[to = "/cv"]
Cv,
#[to = "/projects"]
Projects,
#[to = "/not-found"]
NotFound(Permissive<String>),
#[to = "/"]
About,
} | 19.142857 | 37 | 0.559701 |
39cd6ccb1cd05c01ddc9ac095c6fcc075af78281 | 962 | use docchi_compaction::kval_enum::KVal;
use crate::imp::write::store_ids::StoredIDs;
use docchi_compaction::basic_compaction::{comp_int};
pub(crate) fn write_stored_ids(s : &StoredIDs, r : &mut Vec<KVal>){
match s {
StoredIDs::Zero => {
r.push(KVal::Bit(true));
r.push(KVal::Bit(false));
},
StoredIDs::U64(b) => {
r.push(KVal::Bit(true));
r.push(comp_int(*b as i64));
},
StoredIDs::Bits(b) => {
r.push(KVal::Bit(false));
r.push(KVal::Bit(true));
r.push(comp_int(b.len() as i64));
for &s in b {
r.push(comp_int(s as i64));
}
},
StoredIDs::Numbers(n) => {
r.push(KVal::Bit(false));
r.push(KVal::Bit(false));
r.push(comp_int(n.len() as i64));
for &s in n {
r.push(comp_int(s as i64));
}
}
}
} | 30.0625 | 67 | 0.469854 |
f7fd56cc69bbae052ad419a08ff694707efcf837 | 647 | use std::fmt;
use std::ops::Deref;
/// `AtomicPtr` providing an additional `load_unsync` function.
pub(crate) struct AtomicPtr<T> {
inner: std::sync::atomic::AtomicPtr<T>,
}
impl<T> AtomicPtr<T> {
pub(crate) fn new(ptr: *mut T) -> AtomicPtr<T> {
let inner = std::sync::atomic::AtomicPtr::new(ptr);
AtomicPtr { inner }
}
}
impl<T> Deref for AtomicPtr<T> {
type Target = std::sync::atomic::AtomicPtr<T>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> fmt::Debug for AtomicPtr<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.deref().fmt(fmt)
}
}
| 22.310345 | 64 | 0.591963 |
f9ac8b3e33ff7fca59715da7d9c9a21169201a2c | 2,891 | use crate::data::query::QueryError;
use futures::prelude::*;
use futures::sync::oneshot::Canceled;
use serde::ser::*;
use std::error::Error;
use std::fmt;
/// Errors that can occur while processing incoming requests.
#[derive(Debug)]
pub enum GraphQLServerError {
Canceled(Canceled),
ClientError(String),
QueryError(QueryError),
InternalError(String),
}
impl From<Canceled> for GraphQLServerError {
fn from(e: Canceled) -> Self {
GraphQLServerError::Canceled(e)
}
}
impl From<QueryError> for GraphQLServerError {
fn from(e: QueryError) -> Self {
GraphQLServerError::QueryError(e)
}
}
impl From<&'static str> for GraphQLServerError {
fn from(s: &'static str) -> Self {
GraphQLServerError::InternalError(String::from(s))
}
}
impl From<String> for GraphQLServerError {
fn from(s: String) -> Self {
GraphQLServerError::InternalError(s)
}
}
impl fmt::Display for GraphQLServerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
GraphQLServerError::Canceled(_) => {
write!(f, "GraphQL server error (query was canceled)")
}
GraphQLServerError::ClientError(ref s) => {
write!(f, "GraphQL server error (client error): {}", s)
}
GraphQLServerError::QueryError(ref e) => {
write!(f, "GraphQL server error (query error): {}", e)
}
GraphQLServerError::InternalError(ref s) => {
write!(f, "GraphQL server error (internal error): {}", s)
}
}
}
}
impl Error for GraphQLServerError {
fn description(&self) -> &str {
"Failed to process the GraphQL request"
}
fn cause(&self) -> Option<&dyn Error> {
match *self {
GraphQLServerError::Canceled(ref e) => Some(e),
GraphQLServerError::ClientError(_) => None,
GraphQLServerError::QueryError(ref e) => Some(e),
GraphQLServerError::InternalError(_) => None,
}
}
}
impl Serialize for GraphQLServerError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
if let GraphQLServerError::QueryError(ref e) = *self {
serializer.serialize_some(e)
} else {
let mut map = serializer.serialize_map(Some(1))?;
let msg = format!("{}", self);
map.serialize_entry("message", msg.as_str())?;
map.end()
}
}
}
/// Common trait for GraphQL server implementations.
pub trait GraphQLServer {
type ServeError;
/// Creates a new Tokio task that, when spawned, brings up the GraphQL server.
fn serve(
&mut self,
port: u16,
ws_port: u16,
) -> Result<Box<dyn Future<Item = (), Error = ()> + Send>, Self::ServeError>;
}
| 28.343137 | 82 | 0.588378 |
182b366c502ab3f55b9a0e16b58868c4752006e9 | 36,120 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
use std::fmt::Write;
/// See [`CreateChangesetInput`](crate::input::CreateChangesetInput)
pub mod create_changeset_input {
/// A builder for [`CreateChangesetInput`](crate::input::CreateChangesetInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) dataset_id: std::option::Option<std::string::String>,
pub(crate) change_type: std::option::Option<crate::model::ChangeType>,
pub(crate) source_type: std::option::Option<crate::model::SourceType>,
pub(crate) source_params: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) format_type: std::option::Option<crate::model::FormatType>,
pub(crate) format_params: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>The unique identifier for the FinSpace dataset in which the changeset will be
/// created.</p>
pub fn dataset_id(mut self, input: impl Into<std::string::String>) -> Self {
self.dataset_id = Some(input.into());
self
}
/// <p>The unique identifier for the FinSpace dataset in which the changeset will be
/// created.</p>
pub fn set_dataset_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.dataset_id = input;
self
}
/// <p>Option to indicate how a changeset will be applied to a dataset.</p>
/// <ul>
/// <li>
/// <p>
/// <code>REPLACE</code> - Changeset will be considered as a replacement to all prior
/// loaded changesets.</p>
/// </li>
/// <li>
/// <p>
/// <code>APPEND</code> - Changeset will be considered as an addition to the end of all
/// prior loaded changesets.</p>
/// </li>
/// </ul>
pub fn change_type(mut self, input: crate::model::ChangeType) -> Self {
self.change_type = Some(input);
self
}
/// <p>Option to indicate how a changeset will be applied to a dataset.</p>
/// <ul>
/// <li>
/// <p>
/// <code>REPLACE</code> - Changeset will be considered as a replacement to all prior
/// loaded changesets.</p>
/// </li>
/// <li>
/// <p>
/// <code>APPEND</code> - Changeset will be considered as an addition to the end of all
/// prior loaded changesets.</p>
/// </li>
/// </ul>
pub fn set_change_type(
mut self,
input: std::option::Option<crate::model::ChangeType>,
) -> Self {
self.change_type = input;
self
}
/// <p>Type of the data source from which the files to create the changeset will be
/// sourced.</p>
/// <ul>
/// <li>
/// <p>
/// <code>S3</code> - Amazon S3.</p>
/// </li>
/// </ul>
pub fn source_type(mut self, input: crate::model::SourceType) -> Self {
self.source_type = Some(input);
self
}
/// <p>Type of the data source from which the files to create the changeset will be
/// sourced.</p>
/// <ul>
/// <li>
/// <p>
/// <code>S3</code> - Amazon S3.</p>
/// </li>
/// </ul>
pub fn set_source_type(
mut self,
input: std::option::Option<crate::model::SourceType>,
) -> Self {
self.source_type = input;
self
}
/// Adds a key-value pair to `source_params`.
///
/// To override the contents of this collection use [`set_source_params`](Self::set_source_params).
///
/// <p>Source path from which the files to create the changeset will be sourced.</p>
pub fn source_params(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.source_params.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.source_params = Some(hash_map);
self
}
/// <p>Source path from which the files to create the changeset will be sourced.</p>
pub fn set_source_params(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.source_params = input;
self
}
/// <p>Format type of the input files being loaded into the changeset.</p>
pub fn format_type(mut self, input: crate::model::FormatType) -> Self {
self.format_type = Some(input);
self
}
/// <p>Format type of the input files being loaded into the changeset.</p>
pub fn set_format_type(
mut self,
input: std::option::Option<crate::model::FormatType>,
) -> Self {
self.format_type = input;
self
}
/// Adds a key-value pair to `format_params`.
///
/// To override the contents of this collection use [`set_format_params`](Self::set_format_params).
///
/// <p>Options that define the structure of the source file(s).</p>
pub fn format_params(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.format_params.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.format_params = Some(hash_map);
self
}
/// <p>Options that define the structure of the source file(s).</p>
pub fn set_format_params(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.format_params = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>Metadata tags to apply to this changeset.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>Metadata tags to apply to this changeset.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`CreateChangesetInput`](crate::input::CreateChangesetInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::CreateChangesetInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::CreateChangesetInput {
dataset_id: self.dataset_id,
change_type: self.change_type,
source_type: self.source_type,
source_params: self.source_params,
format_type: self.format_type,
format_params: self.format_params,
tags: self.tags,
})
}
}
}
#[doc(hidden)]
pub type CreateChangesetInputOperationOutputAlias = crate::operation::CreateChangeset;
#[doc(hidden)]
pub type CreateChangesetInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl CreateChangesetInput {
/// Consumes the builder and constructs an Operation<[`CreateChangeset`](crate::operation::CreateChangeset)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::CreateChangeset,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::CreateChangesetInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
let input_1 = &_input.dataset_id;
let input_1 =
input_1
.as_ref()
.ok_or(aws_smithy_http::operation::BuildError::MissingField {
field: "dataset_id",
details: "cannot be empty or unset",
})?;
let dataset_id = aws_smithy_http::label::fmt_string(input_1, false);
if dataset_id.is_empty() {
return Err(aws_smithy_http::operation::BuildError::MissingField {
field: "dataset_id",
details: "cannot be empty or unset",
});
}
write!(
output,
"/datasets/{datasetId}/changesets",
datasetId = dataset_id
)
.expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::CreateChangesetInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::CreateChangesetInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/json",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_create_changeset(&self)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::CreateChangeset::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"CreateChangeset",
"finspacedata",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`CreateChangesetInput`](crate::input::CreateChangesetInput)
pub fn builder() -> crate::input::create_changeset_input::Builder {
crate::input::create_changeset_input::Builder::default()
}
}
/// See [`GetProgrammaticAccessCredentialsInput`](crate::input::GetProgrammaticAccessCredentialsInput)
pub mod get_programmatic_access_credentials_input {
/// A builder for [`GetProgrammaticAccessCredentialsInput`](crate::input::GetProgrammaticAccessCredentialsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) duration_in_minutes: std::option::Option<i64>,
pub(crate) environment_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The time duration in which the credentials remain valid. </p>
pub fn duration_in_minutes(mut self, input: i64) -> Self {
self.duration_in_minutes = Some(input);
self
}
/// <p>The time duration in which the credentials remain valid. </p>
pub fn set_duration_in_minutes(mut self, input: std::option::Option<i64>) -> Self {
self.duration_in_minutes = input;
self
}
/// <p>The habanero environment identifier.</p>
pub fn environment_id(mut self, input: impl Into<std::string::String>) -> Self {
self.environment_id = Some(input.into());
self
}
/// <p>The habanero environment identifier.</p>
pub fn set_environment_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.environment_id = input;
self
}
/// Consumes the builder and constructs a [`GetProgrammaticAccessCredentialsInput`](crate::input::GetProgrammaticAccessCredentialsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetProgrammaticAccessCredentialsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetProgrammaticAccessCredentialsInput {
duration_in_minutes: self.duration_in_minutes.unwrap_or_default(),
environment_id: self.environment_id,
})
}
}
}
#[doc(hidden)]
pub type GetProgrammaticAccessCredentialsInputOperationOutputAlias =
crate::operation::GetProgrammaticAccessCredentials;
#[doc(hidden)]
pub type GetProgrammaticAccessCredentialsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetProgrammaticAccessCredentialsInput {
/// Consumes the builder and constructs an Operation<[`GetProgrammaticAccessCredentials`](crate::operation::GetProgrammaticAccessCredentials)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetProgrammaticAccessCredentials,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetProgrammaticAccessCredentialsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/credentials/programmatic").expect("formatting should succeed");
Ok(())
}
fn uri_query(
_input: &crate::input::GetProgrammaticAccessCredentialsInput,
mut output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
let mut query = aws_smithy_http::query::Writer::new(&mut output);
if _input.duration_in_minutes != 0 {
query.push_kv(
"durationInMinutes",
aws_smithy_types::primitive::Encoder::from(_input.duration_in_minutes).encode(),
);
}
if let Some(inner_2) = &_input.environment_id {
query.push_kv(
"environmentId",
&aws_smithy_http::query::fmt_string(&inner_2),
);
}
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetProgrammaticAccessCredentialsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
uri_query(input, &mut uri)?;
Ok(builder.method("GET").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetProgrammaticAccessCredentialsInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body = aws_smithy_http::body::SdkBody::from("");
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetProgrammaticAccessCredentials::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetProgrammaticAccessCredentials",
"finspacedata",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetProgrammaticAccessCredentialsInput`](crate::input::GetProgrammaticAccessCredentialsInput)
pub fn builder() -> crate::input::get_programmatic_access_credentials_input::Builder {
crate::input::get_programmatic_access_credentials_input::Builder::default()
}
}
/// See [`GetWorkingLocationInput`](crate::input::GetWorkingLocationInput)
pub mod get_working_location_input {
/// A builder for [`GetWorkingLocationInput`](crate::input::GetWorkingLocationInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) location_type: std::option::Option<crate::model::LocationType>,
}
impl Builder {
/// <p>Specify the type of the working location.</p>
/// <ul>
/// <li>
/// <p>
/// <code>SAGEMAKER</code> - Use the Amazon S3 location as a temporary location to store data content when
/// working with FinSpace Notebooks that run on SageMaker studio.</p>
/// </li>
/// <li>
/// <p>
/// <code>INGESTION</code> - Use the Amazon S3 location as a staging location to copy your
/// data content and then use the location with the changeset creation operation.</p>
/// </li>
/// </ul>
pub fn location_type(mut self, input: crate::model::LocationType) -> Self {
self.location_type = Some(input);
self
}
/// <p>Specify the type of the working location.</p>
/// <ul>
/// <li>
/// <p>
/// <code>SAGEMAKER</code> - Use the Amazon S3 location as a temporary location to store data content when
/// working with FinSpace Notebooks that run on SageMaker studio.</p>
/// </li>
/// <li>
/// <p>
/// <code>INGESTION</code> - Use the Amazon S3 location as a staging location to copy your
/// data content and then use the location with the changeset creation operation.</p>
/// </li>
/// </ul>
pub fn set_location_type(
mut self,
input: std::option::Option<crate::model::LocationType>,
) -> Self {
self.location_type = input;
self
}
/// Consumes the builder and constructs a [`GetWorkingLocationInput`](crate::input::GetWorkingLocationInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetWorkingLocationInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetWorkingLocationInput {
location_type: self.location_type,
})
}
}
}
#[doc(hidden)]
pub type GetWorkingLocationInputOperationOutputAlias = crate::operation::GetWorkingLocation;
#[doc(hidden)]
pub type GetWorkingLocationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetWorkingLocationInput {
/// Consumes the builder and constructs an Operation<[`GetWorkingLocation`](crate::operation::GetWorkingLocation)>
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetWorkingLocation,
aws_http::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
fn uri_base(
_input: &crate::input::GetWorkingLocationInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/workingLocationV1").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetWorkingLocationInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
input: &crate::input::GetWorkingLocationInput,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
#[allow(unused_mut)]
let mut builder = update_http_builder(input, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/json",
);
Ok(builder)
}
let properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
let request = request_builder_base(&self)?;
let body =
crate::operation_ser::serialize_operation_crate_operation_get_working_location(&self)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = aws_smithy_http::operation::Request::from_parts(
request.map(aws_smithy_http::body::SdkBody::from),
properties,
);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetWorkingLocation::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetWorkingLocation",
"finspacedata",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
Ok(op)
}
fn assemble(
builder: http::request::Builder,
body: aws_smithy_http::body::SdkBody,
) -> http::request::Request<aws_smithy_http::body::SdkBody> {
let mut builder = builder;
if let Some(content_length) = body.content_length() {
builder = aws_smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetWorkingLocationInput`](crate::input::GetWorkingLocationInput)
pub fn builder() -> crate::input::get_working_location_input::Builder {
crate::input::get_working_location_input::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetWorkingLocationInput {
/// <p>Specify the type of the working location.</p>
/// <ul>
/// <li>
/// <p>
/// <code>SAGEMAKER</code> - Use the Amazon S3 location as a temporary location to store data content when
/// working with FinSpace Notebooks that run on SageMaker studio.</p>
/// </li>
/// <li>
/// <p>
/// <code>INGESTION</code> - Use the Amazon S3 location as a staging location to copy your
/// data content and then use the location with the changeset creation operation.</p>
/// </li>
/// </ul>
pub location_type: std::option::Option<crate::model::LocationType>,
}
impl GetWorkingLocationInput {
/// <p>Specify the type of the working location.</p>
/// <ul>
/// <li>
/// <p>
/// <code>SAGEMAKER</code> - Use the Amazon S3 location as a temporary location to store data content when
/// working with FinSpace Notebooks that run on SageMaker studio.</p>
/// </li>
/// <li>
/// <p>
/// <code>INGESTION</code> - Use the Amazon S3 location as a staging location to copy your
/// data content and then use the location with the changeset creation operation.</p>
/// </li>
/// </ul>
pub fn location_type(&self) -> std::option::Option<&crate::model::LocationType> {
self.location_type.as_ref()
}
}
impl std::fmt::Debug for GetWorkingLocationInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetWorkingLocationInput");
formatter.field("location_type", &self.location_type);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetProgrammaticAccessCredentialsInput {
/// <p>The time duration in which the credentials remain valid. </p>
pub duration_in_minutes: i64,
/// <p>The habanero environment identifier.</p>
pub environment_id: std::option::Option<std::string::String>,
}
impl GetProgrammaticAccessCredentialsInput {
/// <p>The time duration in which the credentials remain valid. </p>
pub fn duration_in_minutes(&self) -> i64 {
self.duration_in_minutes
}
/// <p>The habanero environment identifier.</p>
pub fn environment_id(&self) -> std::option::Option<&str> {
self.environment_id.as_deref()
}
}
impl std::fmt::Debug for GetProgrammaticAccessCredentialsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetProgrammaticAccessCredentialsInput");
formatter.field("duration_in_minutes", &self.duration_in_minutes);
formatter.field("environment_id", &self.environment_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateChangesetInput {
/// <p>The unique identifier for the FinSpace dataset in which the changeset will be
/// created.</p>
pub dataset_id: std::option::Option<std::string::String>,
/// <p>Option to indicate how a changeset will be applied to a dataset.</p>
/// <ul>
/// <li>
/// <p>
/// <code>REPLACE</code> - Changeset will be considered as a replacement to all prior
/// loaded changesets.</p>
/// </li>
/// <li>
/// <p>
/// <code>APPEND</code> - Changeset will be considered as an addition to the end of all
/// prior loaded changesets.</p>
/// </li>
/// </ul>
pub change_type: std::option::Option<crate::model::ChangeType>,
/// <p>Type of the data source from which the files to create the changeset will be
/// sourced.</p>
/// <ul>
/// <li>
/// <p>
/// <code>S3</code> - Amazon S3.</p>
/// </li>
/// </ul>
pub source_type: std::option::Option<crate::model::SourceType>,
/// <p>Source path from which the files to create the changeset will be sourced.</p>
pub source_params:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>Format type of the input files being loaded into the changeset.</p>
pub format_type: std::option::Option<crate::model::FormatType>,
/// <p>Options that define the structure of the source file(s).</p>
pub format_params:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>Metadata tags to apply to this changeset.</p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl CreateChangesetInput {
/// <p>The unique identifier for the FinSpace dataset in which the changeset will be
/// created.</p>
pub fn dataset_id(&self) -> std::option::Option<&str> {
self.dataset_id.as_deref()
}
/// <p>Option to indicate how a changeset will be applied to a dataset.</p>
/// <ul>
/// <li>
/// <p>
/// <code>REPLACE</code> - Changeset will be considered as a replacement to all prior
/// loaded changesets.</p>
/// </li>
/// <li>
/// <p>
/// <code>APPEND</code> - Changeset will be considered as an addition to the end of all
/// prior loaded changesets.</p>
/// </li>
/// </ul>
pub fn change_type(&self) -> std::option::Option<&crate::model::ChangeType> {
self.change_type.as_ref()
}
/// <p>Type of the data source from which the files to create the changeset will be
/// sourced.</p>
/// <ul>
/// <li>
/// <p>
/// <code>S3</code> - Amazon S3.</p>
/// </li>
/// </ul>
pub fn source_type(&self) -> std::option::Option<&crate::model::SourceType> {
self.source_type.as_ref()
}
/// <p>Source path from which the files to create the changeset will be sourced.</p>
pub fn source_params(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.source_params.as_ref()
}
/// <p>Format type of the input files being loaded into the changeset.</p>
pub fn format_type(&self) -> std::option::Option<&crate::model::FormatType> {
self.format_type.as_ref()
}
/// <p>Options that define the structure of the source file(s).</p>
pub fn format_params(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.format_params.as_ref()
}
/// <p>Metadata tags to apply to this changeset.</p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
}
impl std::fmt::Debug for CreateChangesetInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateChangesetInput");
formatter.field("dataset_id", &self.dataset_id);
formatter.field("change_type", &self.change_type);
formatter.field("source_type", &self.source_type);
formatter.field("source_params", &self.source_params);
formatter.field("format_type", &self.format_type);
formatter.field("format_params", &self.format_params);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
| 41.138952 | 152 | 0.593051 |
16f471cf6d25cb18dd2dfb0594e8a9b60540ed82 | 1,728 | extern crate gl;
extern crate glfw;
use glfw::{Glfw, Context};
use gl::types::{GLubyte};
use std::string::String;
use std::ffi::CStr;
fn glubyte_ptr_to_string_safe(cstr: *const GLubyte) -> String {
unsafe {
CStr::from_ptr(cstr as *const i8).to_string_lossy().into_owned()
}
}
fn main() {
// start GL context and O/S window using the GLFW helper library
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
// uncomment these lines if on Apple OS X.
// glfwWindowHint (GLFW_CONTEXT_VERSION_MAJOR, 3);
// glfwWindowHint (GLFW_CONTEXT_VERSION_MINOR, 2);
// glfwWindowHint (GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
// glfwWindowHint (GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
let (mut window, events) = glfw.create_window(640, 480, "Hello Triangle", glfw::WindowMode::Windowed)
.expect("Failed to create GLFW window.");
window.make_current();
// Load the OpenGl function pointers.
gl::load_with(|symbol| { window.get_proc_address(symbol) as *const _ });
// get version info
let renderer = glubyte_ptr_to_string_safe(
unsafe { gl::GetString(gl::RENDERER) }
);
let version = glubyte_ptr_to_string_safe(
unsafe { gl::GetString(gl::VERSION) }
);
println!("Renderer: {}", renderer);
println!("OpenGL version supported {}", version);
// tell GL to only draw onto a pixel if the shape is closer to the viewer
unsafe {
gl::Enable(gl::DEPTH_TEST); // enable depth-testing
gl::DepthFunc(gl::LESS); // depth-testing interprets a smaller value as "closer"
}
/* OTHER STUFF GOES HERE NEXT */
// close GL context and any other GLFW resources
//glfw::ffi::GlfwTerminate();
}
| 31.418182 | 105 | 0.670718 |
61d7edcf9dc759d85f2436caa59678a3763c17dc | 26,993 | use convert_case::{Case, Casing};
use core::convert::TryFrom;
use proc_macro2::TokenStream;
use syn::{
parse::Error, spanned::Spanned as _, AttrStyle, Data, DeriveInput, Expr, Fields,
GenericArgument, Lit, NestedMeta, PathArguments, Type,
};
use crate::{
abi,
abi::{ParamType, Token},
types::{Address, H160, U256},
utils::keccak256,
};
/// Pre-computed value of the following statement:
///
/// `ethers_core::utils::keccak256("EIP712Domain(string name,string version,uint256 chainId,address
/// verifyingContract)")`
pub const EIP712_DOMAIN_TYPE_HASH: [u8; 32] = [
139, 115, 195, 198, 155, 184, 254, 61, 81, 46, 204, 76, 247, 89, 204, 121, 35, 159, 123, 23,
155, 15, 250, 202, 169, 167, 93, 82, 43, 57, 64, 15,
];
/// Pre-computed value of the following statement:
///
/// `ethers_core::utils::keccak256("EIP712Domain(string name,string version,uint256 chainId,address
/// verifyingContract,bytes32 salt)")`
pub const EIP712_DOMAIN_TYPE_HASH_WITH_SALT: [u8; 32] = [
216, 124, 214, 239, 121, 212, 226, 185, 94, 21, 206, 138, 191, 115, 45, 181, 30, 199, 113, 241,
202, 46, 220, 207, 34, 164, 108, 114, 154, 197, 100, 114,
];
/// Error typed used by Eip712 derive macro
#[derive(Debug, thiserror::Error)]
pub enum Eip712Error {
#[error("Failed to serialize serde JSON object")]
SerdeJsonError(#[from] serde_json::Error),
#[error("Failed to decode hex value")]
FromHexError(#[from] hex::FromHexError),
#[error("Failed to make struct hash from values")]
FailedToEncodeStruct,
#[error("Failed to convert slice into byte array")]
TryFromSliceError(#[from] std::array::TryFromSliceError),
#[error("Nested Eip712 struct not implemented. Failed to parse.")]
NestedEip712StructNotImplemented,
#[error("Error from Eip712 struct: {0:?}")]
Inner(String),
}
/// The Eip712 trait provides helper methods for computing
/// the typed data hash used in `eth_signTypedData`.
///
/// The ethers-rs `derive_eip712` crate provides a derive macro to
/// implement the trait for a given struct. See documentation
/// for `derive_eip712` for more information and example usage.
///
/// For those who wish to manually implement this trait, see:
/// <https://eips.ethereum.org/EIPS/eip-712>
///
/// Any rust struct implementing Eip712 must also have a corresponding
/// struct in the verifying ethereum contract that matches its signature.
pub trait Eip712 {
/// User defined error type;
type Error: std::error::Error + Send + Sync + std::fmt::Debug;
/// Default implementation of the domain separator;
fn domain_separator(&self) -> Result<[u8; 32], Self::Error> {
Ok(self.domain()?.separator())
}
/// Returns the current domain. The domain depends on the contract and unique domain
/// for which the user is targeting. In the derive macro, these attributes
/// are passed in as arguments to the macro. When manually deriving, the user
/// will need to know the name of the domain, version of the contract, chain ID of
/// where the contract lives and the address of the verifying contract.
fn domain(&self) -> Result<EIP712Domain, Self::Error>;
/// This method is used for calculating the hash of the type signature of the
/// struct. The field types of the struct must map to primitive
/// ethereum types or custom types defined in the contract.
fn type_hash() -> Result<[u8; 32], Self::Error>;
/// Hash of the struct, according to EIP-712 definition of `hashStruct`
fn struct_hash(&self) -> Result<[u8; 32], Self::Error>;
/// When using the derive macro, this is the primary method used for computing the final
/// EIP-712 encoded payload. This method relies on the aforementioned methods for computing
/// the final encoded payload.
fn encode_eip712(&self) -> Result<[u8; 32], Self::Error> {
// encode the digest to be compatible with solidity abi.encodePacked()
// See: https://github.com/gakonst/ethers-rs/blob/master/examples/permit_hash.rs#L72
let domain_separator = self.domain_separator()?;
let struct_hash = self.struct_hash()?;
let digest_input = [&[0x19, 0x01], &domain_separator[..], &struct_hash[..]].concat();
Ok(keccak256(digest_input))
}
}
/// Eip712 Domain attributes used in determining the domain separator;
/// Unused fields are left out of the struct type.
#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)]
pub struct EIP712Domain {
/// The user readable name of signing domain, i.e. the name of the DApp or the protocol.
pub name: String,
/// The current major version of the signing domain. Signatures from different versions are not
/// compatible.
pub version: String,
/// The EIP-155 chain id. The user-agent should refuse signing if it does not match the
/// currently active chain.
pub chain_id: U256,
/// The address of the contract that will verify the signature.
pub verifying_contract: Address,
/// A disambiguating salt for the protocol. This can be used as a domain separator of last
/// resort.
pub salt: Option<[u8; 32]>,
}
impl EIP712Domain {
// Compute the domain separator;
// See: https://github.com/gakonst/ethers-rs/blob/master/examples/permit_hash.rs#L41
pub fn separator(&self) -> [u8; 32] {
let domain_type_hash = if self.salt.is_some() {
EIP712_DOMAIN_TYPE_HASH_WITH_SALT
} else {
EIP712_DOMAIN_TYPE_HASH
};
let mut tokens = vec![
Token::Uint(U256::from(domain_type_hash)),
Token::Uint(U256::from(keccak256(&self.name))),
Token::Uint(U256::from(keccak256(&self.version))),
Token::Uint(self.chain_id),
Token::Address(self.verifying_contract),
];
// Add the salt to the struct to be hashed if it exists;
if let Some(salt) = &self.salt {
tokens.push(Token::Uint(U256::from(salt)));
}
keccak256(abi::encode(&tokens))
}
}
#[derive(Debug, Clone)]
pub struct EIP712WithDomain<T>
where
T: Clone + Eip712,
{
pub domain: EIP712Domain,
pub inner: T,
}
impl<T: Eip712 + Clone> EIP712WithDomain<T> {
pub fn new(inner: T) -> Result<Self, Eip712Error> {
let domain = inner.domain().map_err(|e| Eip712Error::Inner(e.to_string()))?;
Ok(Self { domain, inner })
}
#[must_use]
pub fn set_domain(self, domain: EIP712Domain) -> Self {
Self { domain, inner: self.inner }
}
}
impl<T: Eip712 + Clone> Eip712 for EIP712WithDomain<T> {
type Error = Eip712Error;
fn domain(&self) -> Result<EIP712Domain, Self::Error> {
Ok(self.domain.clone())
}
fn type_hash() -> Result<[u8; 32], Self::Error> {
let type_hash = T::type_hash().map_err(|e| Self::Error::Inner(e.to_string()))?;
Ok(type_hash)
}
fn struct_hash(&self) -> Result<[u8; 32], Self::Error> {
let struct_hash =
self.inner.clone().struct_hash().map_err(|e| Self::Error::Inner(e.to_string()))?;
Ok(struct_hash)
}
}
// Parse the AST of the struct to determine the domain attributes
impl TryFrom<&syn::DeriveInput> for EIP712Domain {
type Error = TokenStream;
fn try_from(input: &syn::DeriveInput) -> Result<EIP712Domain, Self::Error> {
let mut domain = EIP712Domain::default();
let mut found_eip712_attribute = false;
for attribute in input.attrs.iter() {
if let AttrStyle::Outer = attribute.style {
if let Ok(syn::Meta::List(meta)) = attribute.parse_meta() {
if meta.path.is_ident("eip712") {
found_eip712_attribute = true;
for n in meta.nested.iter() {
if let NestedMeta::Meta(meta) = n {
match meta {
syn::Meta::NameValue(meta) => {
let ident = meta.path.get_ident().ok_or_else(|| {
Error::new(
meta.path.span(),
"unrecognized eip712 parameter",
)
.to_compile_error()
})?;
match ident.to_string().as_ref() {
"name" => match meta.lit {
syn::Lit::Str(ref lit_str) => {
if domain.name != String::default() {
return Err(Error::new(
meta.path.span(),
"domain name already specified",
)
.to_compile_error())
}
domain.name = lit_str.value();
}
_ => {
return Err(Error::new(
meta.path.span(),
"domain name must be a string",
)
.to_compile_error())
}
},
"version" => match meta.lit {
syn::Lit::Str(ref lit_str) => {
if domain.version != String::default() {
return Err(Error::new(
meta.path.span(),
"domain version already specified",
)
.to_compile_error())
}
domain.version = lit_str.value();
}
_ => {
return Err(Error::new(
meta.path.span(),
"domain version must be a string",
)
.to_compile_error())
}
},
"chain_id" => match meta.lit {
syn::Lit::Int(ref lit_int) => {
if domain.chain_id != U256::default() {
return Err(Error::new(
meta.path.span(),
"domain chain_id already specified",
)
.to_compile_error())
}
domain.chain_id = U256::from(
lit_int.base10_parse::<u64>().map_err(
|_| {
Error::new(
meta.path.span(),
"failed to parse chain id",
)
.to_compile_error()
},
)?,
);
}
_ => {
return Err(Error::new(
meta.path.span(),
"domain chain_id must be a positive integer",
)
.to_compile_error());
}
},
"verifying_contract" => match meta.lit {
syn::Lit::Str(ref lit_str) => {
if domain.verifying_contract != H160::default()
{
return Err(Error::new(
meta.path.span(),
"domain verifying_contract already specified",
)
.to_compile_error());
}
domain.verifying_contract = lit_str.value().parse().map_err(|_| {
Error::new(
meta.path.span(),
"failed to parse verifying contract into Address",
)
.to_compile_error()
})?;
}
_ => {
return Err(Error::new(
meta.path.span(),
"domain verifying_contract must be a string",
)
.to_compile_error());
}
},
"salt" => match meta.lit {
syn::Lit::Str(ref lit_str) => {
if domain.salt != Option::None {
return Err(Error::new(
meta.path.span(),
"domain salt already specified",
)
.to_compile_error())
}
// keccak256(<string>) to compute bytes32
// encoded domain salt
let salt = keccak256(lit_str.value());
domain.salt = Some(salt);
}
_ => {
return Err(Error::new(
meta.path.span(),
"domain salt must be a string",
)
.to_compile_error())
}
},
_ => {
return Err(Error::new(
meta.path.span(),
"unrecognized eip712 parameter; must be one of 'name', 'version', 'chain_id', or 'verifying_contract'",
)
.to_compile_error());
}
}
}
syn::Meta::Path(path) => {
return Err(Error::new(
path.span(),
"unrecognized eip712 parameter",
)
.to_compile_error())
}
syn::Meta::List(meta) => {
return Err(Error::new(
meta.path.span(),
"unrecognized eip712 parameter",
)
.to_compile_error())
}
}
}
}
if domain.name == String::default() {
return Err(Error::new(
meta.path.span(),
"missing required domain attribute: 'name'".to_string(),
)
.to_compile_error())
}
if domain.version == String::default() {
return Err(Error::new(
meta.path.span(),
"missing required domain attribute: 'version'".to_string(),
)
.to_compile_error())
}
if domain.chain_id == U256::default() {
return Err(Error::new(
meta.path.span(),
"missing required domain attribute: 'chain_id'".to_string(),
)
.to_compile_error())
}
if domain.verifying_contract == H160::default() {
return Err(Error::new(
meta.path.span(),
"missing required domain attribute: 'verifying_contract'"
.to_string(),
)
.to_compile_error())
}
}
}
}
}
if !found_eip712_attribute {
return Err(Error::new_spanned(
input,
"missing required derive attribute: '#[eip712( ... )]'".to_string(),
)
.to_compile_error())
}
Ok(domain)
}
}
/// Parse the eth abi parameter type based on the syntax type;
/// this method is copied from <https://github.com/gakonst/ethers-rs/blob/master/ethers-contract/ethers-contract-derive/src/lib.rs#L600>
/// with additional modifications for finding byte arrays
pub fn find_parameter_type(ty: &Type) -> Result<ParamType, TokenStream> {
match ty {
Type::Array(ty) => {
let param = find_parameter_type(ty.elem.as_ref())?;
if let Expr::Lit(ref expr) = ty.len {
if let Lit::Int(ref len) = expr.lit {
if let Ok(size) = len.base10_parse::<usize>() {
if let ParamType::Uint(_) = param {
return Ok(ParamType::FixedBytes(size))
}
return Ok(ParamType::FixedArray(Box::new(param), size))
}
}
}
Err(Error::new(ty.span(), "Failed to derive proper ABI from array field")
.to_compile_error())
}
Type::Path(ty) => {
if let Some(ident) = ty.path.get_ident() {
let ident = ident.to_string().to_lowercase();
return match ident.as_str() {
"address" => Ok(ParamType::Address),
"string" => Ok(ParamType::String),
"bool" => Ok(ParamType::Bool),
"int256" | "int" | "uint" | "uint256" => Ok(ParamType::Uint(256)),
"h160" => Ok(ParamType::FixedBytes(20)),
"h256" | "secret" | "hash" => Ok(ParamType::FixedBytes(32)),
"h512" | "public" => Ok(ParamType::FixedBytes(64)),
"bytes" => Ok(ParamType::Bytes),
s => parse_int_param_type(s).ok_or_else(|| {
Error::new(
ty.span(),
format!("Failed to derive proper ABI from field: {})", s),
)
.to_compile_error()
}),
}
}
// check for `Vec`
if ty.path.segments.len() == 1 && ty.path.segments[0].ident == "Vec" {
if let PathArguments::AngleBracketed(ref args) = ty.path.segments[0].arguments {
if args.args.len() == 1 {
if let GenericArgument::Type(ref ty) = args.args.iter().next().unwrap() {
let kind = find_parameter_type(ty)?;
// Check if byte array is found
if let ParamType::Uint(size) = kind {
if size == 8 {
return Ok(ParamType::Bytes)
}
}
return Ok(ParamType::Array(Box::new(kind)))
}
}
}
}
Err(Error::new(ty.span(), "Failed to derive proper ABI from fields").to_compile_error())
}
Type::Tuple(ty) => {
let params = ty.elems.iter().map(find_parameter_type).collect::<Result<Vec<_>, _>>()?;
Ok(ParamType::Tuple(params))
}
_ => {
Err(Error::new(ty.span(), "Failed to derive proper ABI from fields").to_compile_error())
}
}
}
fn parse_int_param_type(s: &str) -> Option<ParamType> {
let size = s.chars().skip(1).collect::<String>().parse::<usize>().ok()?;
if s.starts_with('u') {
Some(ParamType::Uint(size))
} else if s.starts_with('i') {
Some(ParamType::Int(size))
} else {
None
}
}
/// Return HashMap of the field name and the field type;
pub fn parse_fields(ast: &DeriveInput) -> Result<Vec<(String, ParamType)>, TokenStream> {
let mut fields = Vec::new();
let data = match &ast.data {
Data::Struct(s) => s,
_ => {
return Err(Error::new(
ast.span(),
"invalid data type. can only derive Eip712 for a struct",
)
.to_compile_error())
}
};
let named_fields = match &data.fields {
Fields::Named(name) => name,
_ => {
return Err(Error::new(ast.span(), "unnamed fields are not supported").to_compile_error())
}
};
for f in named_fields.named.iter() {
let field_name =
f.ident.clone().map(|i| i.to_string().to_case(Case::Camel)).ok_or_else(|| {
Error::new(named_fields.span(), "fields must be named").to_compile_error()
})?;
let field_type =
match f.attrs.iter().find(|a| a.path.segments.iter().any(|s| s.ident == "eip712")) {
// Found nested Eip712 Struct
// TODO: Implement custom
Some(a) => {
return Err(Error::new(a.span(), "nested Eip712 struct are not yet supported")
.to_compile_error())
}
// Not a nested eip712 struct, return the field param type;
None => find_parameter_type(&f.ty)?,
};
fields.push((field_name, field_type));
}
Ok(fields)
}
/// Convert hash map of field names and types into a type hash corresponding to enc types;
pub fn make_type_hash(primary_type: String, fields: &[(String, ParamType)]) -> [u8; 32] {
let parameters =
fields.iter().map(|(k, v)| format!("{} {}", v, k)).collect::<Vec<String>>().join(",");
let sig = format!("{}({})", primary_type, parameters);
keccak256(sig)
}
/// Parse token into Eip712 compliant ABI encoding
/// NOTE: Token::Tuple() is currently not supported for solidity structs;
/// this is needed for nested Eip712 types, but is not implemented.
pub fn encode_eip712_type(token: Token) -> Token {
match token {
Token::Bytes(t) => Token::Uint(U256::from(keccak256(t))),
Token::FixedBytes(t) => Token::Uint(U256::from(&t[..])),
Token::String(t) => Token::Uint(U256::from(keccak256(t))),
Token::Bool(t) => {
// Boolean false and true are encoded as uint256 values 0 and 1 respectively
Token::Uint(U256::from(t as i32))
}
Token::Int(t) => {
// Integer values are sign-extended to 256-bit and encoded in big endian order.
Token::Uint(t)
}
Token::Array(tokens) => Token::Uint(U256::from(keccak256(abi::encode(
&tokens.into_iter().map(encode_eip712_type).collect::<Vec<Token>>(),
)))),
Token::FixedArray(tokens) => Token::Uint(U256::from(keccak256(abi::encode(
&tokens.into_iter().map(encode_eip712_type).collect::<Vec<Token>>(),
)))),
_ => {
// Return the ABI encoded token;
token
}
}
}
| 46.300172 | 155 | 0.411292 |
5bc4363594e98c21c144d15320e541d1087aba82 | 6,470 | use crate::{client::CLIENT_ID, prelude::*};
use neon::prelude::*;
use stencila::documents::{self, DOCUMENTS};
/// Get the module's schemas
pub fn schemas(cx: FunctionContext) -> JsResult<JsString> {
let schemas = documents::schemas();
to_json_or_throw(cx, schemas)
}
/// List documents
pub fn list(cx: FunctionContext) -> JsResult<JsString> {
let result = RUNTIME.block_on(async { DOCUMENTS.list().await });
to_json_or_throw(cx, result)
}
/// Create a document
pub fn create(mut cx: FunctionContext) -> JsResult<JsString> {
let path = not_empty_or_none(&cx.argument::<JsString>(0)?.value(&mut cx));
let format = not_empty_or_none(&cx.argument::<JsString>(1)?.value(&mut cx));
let result = RUNTIME.block_on(async { DOCUMENTS.create(path, format).await });
to_json_or_throw(cx, result)
}
/// Open a document
pub fn open(mut cx: FunctionContext) -> JsResult<JsString> {
let path = &cx.argument::<JsString>(0)?.value(&mut cx);
let format = not_empty_or_none(&cx.argument::<JsString>(1)?.value(&mut cx));
let result = RUNTIME.block_on(async { DOCUMENTS.open(path, format).await });
to_json_or_throw(cx, result)
}
/// Get a document
pub fn get(mut cx: FunctionContext) -> JsResult<JsString> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => {
let document = &mut *document.lock().await;
Ok(document.clone())
}
Err(error) => Err(error),
}
});
to_json_or_throw(cx, result)
}
/// Alter a document's properties
pub fn alter(mut cx: FunctionContext) -> JsResult<JsString> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let path = not_empty_or_none(&cx.argument::<JsString>(1)?.value(&mut cx));
let format = not_empty_or_none(&cx.argument::<JsString>(2)?.value(&mut cx));
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => {
let document = &mut *document.lock().await;
document.alter(path, format).await?;
Ok(document.clone())
}
Err(error) => Err(error),
}
});
to_json_or_throw(cx, result)
}
/// Read a document
pub fn read(mut cx: FunctionContext) -> JsResult<JsString> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => document.lock().await.read().await,
Err(error) => Err(error),
}
});
to_string_or_throw(cx, result)
}
/// Write a document
pub fn write(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let content = cx.argument::<JsString>(1)?.value(&mut cx);
let format = not_empty_or_none(&cx.argument::<JsString>(2)?.value(&mut cx));
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => document.lock().await.write(Some(content), format).await,
Err(error) => Err(error),
}
});
to_undefined_or_throw(cx, result)
}
/// Write a document to another path / format
pub fn write_as(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let path = cx.argument::<JsString>(1)?.value(&mut cx);
let format = cx.argument::<JsString>(2)?.value(&mut cx);
let format = if format.is_empty() {
None
} else {
Some(format)
};
let theme = cx.argument::<JsString>(3)?.value(&mut cx);
let theme = if theme.is_empty() { None } else { Some(theme) };
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => document.lock().await.write_as(path, format, theme).await,
Err(error) => Err(error),
}
});
to_undefined_or_throw(cx, result)
}
/// Dump a document
pub fn dump(mut cx: FunctionContext) -> JsResult<JsString> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let format = cx.argument::<JsString>(1)?.value(&mut cx);
let format = if format.is_empty() {
None
} else {
Some(format)
};
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => document.lock().await.dump(format).await,
Err(error) => Err(error),
}
});
to_string_or_throw(cx, result)
}
/// Load a document
pub fn load(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let content = cx.argument::<JsString>(1)?.value(&mut cx);
let format = not_empty_or_none(&cx.argument::<JsString>(2)?.value(&mut cx));
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => document.lock().await.load(content, format).await,
Err(error) => Err(error),
}
});
to_undefined_or_throw(cx, result)
}
/// Subscribe to one or more of a document's topics
pub fn subscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let topic = &cx.argument::<JsString>(1)?.value(&mut cx);
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => {
document.lock().await.subscribe(topic, CLIENT_ID);
Ok(())
}
Err(error) => Err(error),
}
});
to_undefined_or_throw(cx, result)
}
/// Unsubscribe from one or more of a document's topics
pub fn unsubscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let topic = &cx.argument::<JsString>(1)?.value(&mut cx);
let result = RUNTIME.block_on(async {
match DOCUMENTS.get(id).await {
Ok(document) => {
document.lock().await.unsubscribe(topic, CLIENT_ID);
Ok(())
}
Err(error) => Err(error),
}
});
to_undefined_or_throw(cx, result)
}
/// Close a document
pub fn close(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let id = &cx.argument::<JsString>(0)?.value(&mut cx);
let result = RUNTIME.block_on(async { DOCUMENTS.close(id).await });
to_undefined_or_throw(cx, result)
}
| 33.179487 | 86 | 0.60541 |
f94d2f77eb831262fa83a228c3d2e438883c4f33 | 13,023 | use std::os::raw::c_int;
use serde::{ser, Serialize};
use super::LuaSerdeExt;
use crate::error::{Error, Result};
use crate::ffi;
use crate::lua::Lua;
use crate::string::String;
use crate::table::Table;
use crate::types::Integer;
use crate::util::{check_stack, StackGuard};
use crate::value::{ToLua, Value};
/// A struct for serializing Rust values into Lua values.
#[derive(Debug)]
pub struct Serializer<'lua> {
lua: &'lua Lua,
options: Options,
}
/// A struct with options to change default serializer behaviour.
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub struct Options {
/// If true, sequence serialization to a Lua table will create table
/// with the [`array_metatable`] attached.
///
/// Default: **true**
///
/// [`array_metatable`]: ../trait.LuaSerdeExt.html#tymethod.array_metatable
pub set_array_metatable: bool,
/// If true, serialize `None` (part of `Option` type) to [`null`].
/// Otherwise it will be set to Lua [`Nil`].
///
/// Default: **true**
///
/// [`null`]: ../trait.LuaSerdeExt.html#tymethod.null
/// [`Nil`]: ../../enum.Value.html#variant.Nil
pub serialize_none_to_null: bool,
/// If true, serialize `Unit` (type of `()` in Rust) and Unit structs to [`null`].
/// Otherwise it will be set to Lua [`Nil`].
///
/// Default: **true**
///
/// [`null`]: ../trait.LuaSerdeExt.html#tymethod.null
/// [`Nil`]: ../../enum.Value.html#variant.Nil
pub serialize_unit_to_null: bool,
}
impl Default for Options {
fn default() -> Self {
Options {
set_array_metatable: true,
serialize_none_to_null: true,
serialize_unit_to_null: true,
}
}
}
impl Options {
/// Retruns a new instance of `Options` with default parameters.
pub fn new() -> Self {
Self::default()
}
/// Sets [`set_array_metatable`] option.
///
/// [`set_array_metatable`]: #structfield.set_array_metatable
pub fn set_array_metatable(mut self, enabled: bool) -> Self {
self.set_array_metatable = enabled;
self
}
/// Sets [`serialize_none_to_null`] option.
///
/// [`serialize_none_to_null`]: #structfield.serialize_none_to_null
pub fn serialize_none_to_null(mut self, enabled: bool) -> Self {
self.serialize_none_to_null = enabled;
self
}
/// Sets [`serialize_unit_to_null`] option.
///
/// [`serialize_unit_to_null`]: #structfield.serialize_unit_to_null
pub fn serialize_unit_to_null(mut self, enabled: bool) -> Self {
self.serialize_unit_to_null = enabled;
self
}
}
impl<'lua> Serializer<'lua> {
/// Creates a new Lua Serializer with default options.
pub fn new(lua: &'lua Lua) -> Self {
Self::new_with_options(lua, Options::default())
}
/// Creates a new Lua Serializer with custom options.
pub fn new_with_options(lua: &'lua Lua, options: Options) -> Self {
Serializer { lua, options }
}
}
macro_rules! lua_serialize_number {
($name:ident, $t:ty) => {
#[inline]
fn $name(self, value: $t) -> Result<Value<'lua>> {
value.to_lua(self.lua)
}
};
}
impl<'lua> ser::Serializer for Serializer<'lua> {
type Ok = Value<'lua>;
type Error = Error;
// Associated types for keeping track of additional state while serializing
// compound data structures like sequences and maps.
type SerializeSeq = SerializeVec<'lua>;
type SerializeTuple = SerializeVec<'lua>;
type SerializeTupleStruct = SerializeVec<'lua>;
type SerializeTupleVariant = SerializeTupleVariant<'lua>;
type SerializeMap = SerializeMap<'lua>;
type SerializeStruct = SerializeMap<'lua>;
type SerializeStructVariant = SerializeStructVariant<'lua>;
#[inline]
fn serialize_bool(self, value: bool) -> Result<Value<'lua>> {
Ok(Value::Boolean(value))
}
lua_serialize_number!(serialize_i8, i8);
lua_serialize_number!(serialize_u8, u8);
lua_serialize_number!(serialize_i16, i16);
lua_serialize_number!(serialize_u16, u16);
lua_serialize_number!(serialize_i32, i32);
lua_serialize_number!(serialize_u32, u32);
lua_serialize_number!(serialize_i64, i64);
lua_serialize_number!(serialize_u64, u64);
lua_serialize_number!(serialize_f32, f32);
lua_serialize_number!(serialize_f64, f64);
#[inline]
fn serialize_char(self, value: char) -> Result<Value<'lua>> {
self.serialize_str(&value.to_string())
}
#[inline]
fn serialize_str(self, value: &str) -> Result<Value<'lua>> {
self.lua.create_string(value).map(Value::String)
}
#[inline]
fn serialize_bytes(self, value: &[u8]) -> Result<Value<'lua>> {
self.lua.create_string(value).map(Value::String)
}
#[inline]
fn serialize_none(self) -> Result<Value<'lua>> {
if self.options.serialize_none_to_null {
Ok(self.lua.null())
} else {
Ok(Value::Nil)
}
}
#[inline]
fn serialize_some<T>(self, value: &T) -> Result<Value<'lua>>
where
T: Serialize + ?Sized,
{
value.serialize(self)
}
#[inline]
fn serialize_unit(self) -> Result<Value<'lua>> {
if self.options.serialize_unit_to_null {
Ok(self.lua.null())
} else {
Ok(Value::Nil)
}
}
#[inline]
fn serialize_unit_struct(self, _name: &'static str) -> Result<Value<'lua>> {
if self.options.serialize_unit_to_null {
Ok(self.lua.null())
} else {
Ok(Value::Nil)
}
}
#[inline]
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Value<'lua>> {
self.serialize_str(variant)
}
#[inline]
fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<Value<'lua>>
where
T: Serialize + ?Sized,
{
value.serialize(self)
}
#[inline]
fn serialize_newtype_variant<T>(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
value: &T,
) -> Result<Value<'lua>>
where
T: Serialize + ?Sized,
{
let table = self.lua.create_table()?;
let variant = self.lua.create_string(variant)?;
let value = self.lua.to_value_with(value, self.options)?;
table.raw_set(variant, value)?;
Ok(Value::Table(table))
}
#[inline]
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> {
let len = len.unwrap_or(0) as c_int;
let table = self.lua.create_table_with_capacity(len, 0)?;
if self.options.set_array_metatable {
table.set_metatable(Some(self.lua.array_metatable()));
}
let options = self.options;
Ok(SerializeVec { table, options })
}
#[inline]
fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple> {
self.serialize_seq(Some(len))
}
#[inline]
fn serialize_tuple_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct> {
self.serialize_seq(Some(len))
}
#[inline]
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant> {
Ok(SerializeTupleVariant {
name: self.lua.create_string(variant)?,
table: self.lua.create_table()?,
options: self.options,
})
}
#[inline]
fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap> {
let len = len.unwrap_or(0) as c_int;
Ok(SerializeMap {
key: None,
table: self.lua.create_table_with_capacity(0, len)?,
options: self.options,
})
}
#[inline]
fn serialize_struct(self, _name: &'static str, len: usize) -> Result<Self::SerializeStruct> {
self.serialize_map(Some(len))
}
#[inline]
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeStructVariant> {
Ok(SerializeStructVariant {
name: self.lua.create_string(variant)?,
table: self.lua.create_table_with_capacity(0, len as c_int)?,
options: self.options,
})
}
}
#[doc(hidden)]
pub struct SerializeVec<'lua> {
table: Table<'lua>,
options: Options,
}
impl<'lua> ser::SerializeSeq for SerializeVec<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_element<T>(&mut self, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
let lua = self.table.0.lua;
let value = lua.to_value_with(value, self.options)?;
unsafe {
let _sg = StackGuard::new(lua.state);
check_stack(lua.state, 6)?;
lua.push_ref(&self.table.0);
lua.push_value(value)?;
let len = ffi::lua_rawlen(lua.state, -2) as Integer;
ffi::safe::lua_rawseti(lua.state, -2, len + 1)
}
}
fn end(self) -> Result<Value<'lua>> {
Ok(Value::Table(self.table))
}
}
impl<'lua> ser::SerializeTuple for SerializeVec<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_element<T>(&mut self, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<Value<'lua>> {
ser::SerializeSeq::end(self)
}
}
impl<'lua> ser::SerializeTupleStruct for SerializeVec<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_field<T>(&mut self, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<Value<'lua>> {
ser::SerializeSeq::end(self)
}
}
#[doc(hidden)]
pub struct SerializeTupleVariant<'lua> {
name: String<'lua>,
table: Table<'lua>,
options: Options,
}
impl<'lua> ser::SerializeTupleVariant for SerializeTupleVariant<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_field<T>(&mut self, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
let lua = self.table.0.lua;
let idx = self.table.raw_len() + 1;
self.table
.raw_insert(idx, lua.to_value_with(value, self.options)?)
}
fn end(self) -> Result<Value<'lua>> {
let lua = self.table.0.lua;
let table = lua.create_table()?;
table.raw_set(self.name, self.table)?;
Ok(Value::Table(table))
}
}
#[doc(hidden)]
pub struct SerializeMap<'lua> {
table: Table<'lua>,
key: Option<Value<'lua>>,
options: Options,
}
impl<'lua> ser::SerializeMap for SerializeMap<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_key<T>(&mut self, key: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
let lua = self.table.0.lua;
self.key = Some(lua.to_value_with(key, self.options)?);
Ok(())
}
fn serialize_value<T>(&mut self, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
let lua = self.table.0.lua;
let key = mlua_expect!(
self.key.take(),
"serialize_value called before serialize_key"
);
let value = lua.to_value_with(value, self.options)?;
self.table.raw_set(key, value)
}
fn end(self) -> Result<Value<'lua>> {
Ok(Value::Table(self.table))
}
}
impl<'lua> ser::SerializeStruct for SerializeMap<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
ser::SerializeMap::serialize_key(self, key)?;
ser::SerializeMap::serialize_value(self, value)
}
fn end(self) -> Result<Value<'lua>> {
ser::SerializeMap::end(self)
}
}
#[doc(hidden)]
pub struct SerializeStructVariant<'lua> {
name: String<'lua>,
table: Table<'lua>,
options: Options,
}
impl<'lua> ser::SerializeStructVariant for SerializeStructVariant<'lua> {
type Ok = Value<'lua>;
type Error = Error;
fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
where
T: Serialize + ?Sized,
{
let lua = self.table.0.lua;
self.table
.raw_set(key, lua.to_value_with(value, self.options)?)?;
Ok(())
}
fn end(self) -> Result<Value<'lua>> {
let lua = self.table.0.lua;
let table = lua.create_table()?;
table.raw_set(self.name, self.table)?;
Ok(Value::Table(table))
}
}
| 27.187891 | 97 | 0.590878 |
5d0faef2a2409a457537e2ecc30ba7946986ba75 | 471 | use std::error::Error;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, Write};
pub fn read(file: &mut File) -> Result<f64, Box<dyn Error>> {
let mut content = String::new();
file.read_to_string(&mut content)?;
file.seek(SeekFrom::Start(0))?;
Ok(content.trim().parse()?)
}
pub fn write(file: &mut File, value: f64) -> Result<(), Box<dyn Error>> {
file.write_all(value.to_string().as_bytes())?;
file.seek(SeekFrom::Start(0))?;
Ok(())
}
| 27.705882 | 73 | 0.619958 |
1e3118095b63b5ae75c1db8f3a2bd083f9c7c2a3 | 1,337 | use std::collections::VecDeque;
/// Represents a position within the history.
/// Smaller numbers are assumed to be before larger numbers,
/// and the indices are assumed to be contiguous.
pub type HistoryIndex = usize;
/// Defines the history interface for the line editor.
pub trait History {
/// Lookup the line corresponding to an index.
fn get(&self, idx: HistoryIndex) -> Option<&str>;
/// Return the index for the most recently added entry.
fn last(&self) -> Option<HistoryIndex>;
/// Add an entry.
/// Note that the LineEditor will not automatically call
/// the add method.
fn add(&mut self, line: &str);
}
/// A simple history implementation that holds entries in memory.
#[derive(Default)]
pub struct BasicHistory {
entries: VecDeque<String>,
}
impl History for BasicHistory {
fn get(&self, idx: HistoryIndex) -> Option<&str> {
self.entries.get(idx).map(String::as_str)
}
fn last(&self) -> Option<HistoryIndex> {
if self.entries.is_empty() {
None
} else {
Some(self.entries.len() - 1)
}
}
fn add(&mut self, line: &str) {
if self.entries.back().map(String::as_str) == Some(line) {
// Ignore duplicates
return;
}
self.entries.push_back(line.to_owned());
}
}
| 28.446809 | 66 | 0.62528 |
26acee2ca4cc6f297a90a056a99ba6c8c1c43a5a | 5,352 | #[macro_use]
extern crate criterion;
use criterion::{BatchSize, Criterion, ParameterizedBenchmark};
use kvs::{KvStore, KvsEngine, SledKvsEngine};
use rand::prelude::*;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use std::iter;
use tempfile::TempDir;
const NUM_SET_OP_PER_ITER: u16 = 100;
const NUM_GET_OP_PER_ITER: u16 = 1000;
const MAX_STR_LEN: usize = 100_000;
fn set_bench(c: &mut Criterion) {
let bench = ParameterizedBenchmark::new(
"sled",
|b, _| {
b.iter_batched(
|| {
let temp_dir = TempDir::new().expect("unable to create temp dir");
let store = SledKvsEngine::open(temp_dir.path()).unwrap();
let mut rng: StdRng = SeedableRng::from_seed([1u8; 32]);
let mut dataset = Vec::new();
for _ in 0..NUM_SET_OP_PER_ITER {
dataset.push(gen_rand_string_pair(rng.gen()));
}
(store, dataset)
},
|(store, dataset)| {
for (key, value) in dataset {
store.set(key, value).unwrap();
}
},
BatchSize::SmallInput,
)
},
iter::once(()),
)
.with_function("kvs", |b, _| {
b.iter_batched(
|| {
let temp_dir = TempDir::new().expect("unable to create temp dir");
let store = KvStore::open(temp_dir.path()).unwrap();
let mut rng: StdRng = SeedableRng::from_seed([1u8; 32]);
let mut dataset = Vec::new();
for _ in 0..NUM_SET_OP_PER_ITER {
dataset.push(gen_rand_string_pair(rng.gen()));
}
(store, dataset, temp_dir)
},
|(store, dataset, _temp_dir)| {
for (key, value) in dataset {
store.set(key, value).unwrap();
}
},
BatchSize::SmallInput,
)
})
.measurement_time(std::time::Duration::from_millis(500));
c.bench("set_bench", bench);
}
fn get_bench(c: &mut Criterion) {
let bench = ParameterizedBenchmark::new(
"kvs",
|b, _| {
b.iter_batched(
|| {
let temp_dir = TempDir::new().expect("unable to create temp dir");
let store = KvStore::open(temp_dir.path()).unwrap();
let mut rng: StdRng = SeedableRng::from_seed([1u8; 32]);
let mut key_set = Vec::new();
for _ in 0..NUM_GET_OP_PER_ITER / 10 {
let (key, value) = gen_rand_string_pair(rng.gen());
store.set(key.to_owned(), value.to_owned()).unwrap();
key_set.push(key);
}
let choose_rng = rand::thread_rng();
(store, key_set, choose_rng, temp_dir)
},
|(store, key_set, mut rng, _temp_dir)| {
for _ in 0..NUM_GET_OP_PER_ITER {
store
.get(key_set.choose(&mut rng).unwrap().to_owned())
.unwrap();
}
},
BatchSize::SmallInput,
)
},
iter::once(()),
)
.with_function("sled", |b, _| {
b.iter_batched(
|| {
let temp_dir = TempDir::new().expect("unable to create temp dir");
let store = SledKvsEngine::open(temp_dir.path()).unwrap();
let mut rng: StdRng = SeedableRng::from_seed([1u8; 32]);
let mut key_set = Vec::new();
for _ in 0..NUM_GET_OP_PER_ITER / 10 {
let (key, value) = gen_rand_string_pair(rng.gen());
store.set(key.to_owned(), value.to_owned()).unwrap();
key_set.push(key);
}
let choose_rng = rand::thread_rng();
(store, key_set, choose_rng)
},
|(store, key_set, mut rng)| {
for _ in 0..NUM_GET_OP_PER_ITER {
store
.get(key_set.choose(&mut rng).unwrap().to_owned())
.unwrap();
}
},
BatchSize::SmallInput,
)
})
.measurement_time(std::time::Duration::from_millis(500));
c.bench("get_bench", bench);
}
/* generate a random string with random length between 1 and MAX_STR_LEN */
fn gen_rand_string_pair(seed: u8) -> (String, String) {
/* gen key */
let mut rng: StdRng = SeedableRng::from_seed([seed; 32]);
let size = rng.gen_range(1, MAX_STR_LEN);
let mut v1 = Vec::with_capacity(size);
rng.fill(&mut v1[..]);
/* gen value */
drop(rng);
let mut rng = rand::thread_rng();
let size = rng.gen_range(1, 100_000);
let mut v2 = Vec::with_capacity(size);
rng.fill(&mut v2[..]);
(
String::from_utf8_lossy(&v1).to_string(),
String::from_utf8_lossy(&v2).to_string(),
)
}
criterion_group!(benches, set_bench, get_bench);
// criterion_group!(benches, get_bench);
// criterion_group!(benches, set_bench);
criterion_main!(benches);
| 36.162162 | 86 | 0.490284 |
22891bc2bea7855d05b5250562d371070fb46b42 | 15,300 | use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::sync::Arc;
use datafusion::arrow::array::{ArrayRef, BooleanArray, PrimitiveArray, StringArray};
use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::arrow::datatypes::{Float64Type, Int64Type};
use datafusion::arrow::record_batch::RecordBatch;
use regex::Regex;
use reqwest::Client;
use serde_derive::Deserialize;
use uriparse::URIReference;
use crate::error::ColumnQError;
use crate::table::{TableOptionGoogleSpreasheet, TableSource};
// steps
// * Activate the Google Sheets API in the Google API Console.
//
// * Create service account: https://console.developers.google.com/apis/api/sheets.googleapis.com/credentials?project=roapi-302505
// * create key and save the json format somewhere safe
// * Share spreadsheet with service account
#[derive(Deserialize, Debug)]
struct SheetProperties {
#[serde(rename = "sheetId")]
sheet_id: usize,
title: String,
index: usize,
// other unused attributes:
//
// "sheetType": "GRID",
// "gridProperties": {
// "rowCount": 1000,
// "columnCount": 28
// }
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/sheets#SheetProperties
}
#[derive(Deserialize, Debug)]
struct Sheet {
properties: SheetProperties,
// for all available fields, see:
// https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
}
#[derive(Deserialize, Debug)]
struct Spreadsheets {
sheets: Vec<Sheet>,
// other unused attributes:
// * spreadsheetId
// * properties
// * spreadsheetUrl
//
// see: https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpeadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpeadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpeadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpeadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
.ok_or(ColumnQError::MissingOption)?
.as_google_spreadsheet()?;
let token = fetch_auth_token(opt).await?;
let token_str = token.as_str();
let sheet_title = match &opt.sheet_title {
Some(t) => t.clone(),
None => resolve_sheet_title(token_str, spreadsheet_id, &uri).await?,
};
let resp = gs_api_get(
token_str,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}/values/{}",
spreadsheet_id, sheet_title,
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!(
"Failed to load sheet value from API: {}",
e.to_string()
))
})?;
let sheet = resp.json::<SpreadsheetValues>().await.map_err(|e| {
ColumnQError::GoogleSpeadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
let batch = sheet_values_to_record_batch(&sheet.values)?;
let schema_ref = batch.schema();
let partitions = vec![vec![batch]];
Ok(datafusion::datasource::MemTable::try_new(
schema_ref, partitions,
)?)
}
#[cfg(test)]
mod tests {
use super::*;
use datafusion::arrow::array::{BooleanArray, Int64Array};
fn row(raw: &[&str]) -> Vec<String> {
raw.iter().map(|s| s.to_string()).collect()
}
fn property_sheet() -> SpreadsheetValues {
SpreadsheetValues {
range: "Properties!A1:AB1000".to_string(),
major_dimension: "ROWS".to_string(),
values: vec![
row(&[
"Address",
"Image",
"Landlord",
"Bed",
"Bath",
"Occupied",
"Monthly Rent",
"Lease Expiration Date",
"Days Until Expiration",
]),
row(&[
"Bothell, WA",
"https://a.com/1.jpeg",
"Roger",
"3",
"2",
"FALSE",
"$2,000",
"10/23/2020",
"Expired",
]),
row(&[
"Mill Creek, WA",
"https://a.com/2.jpeg",
"Sam",
"3",
"3",
"TRUE",
"$3,500",
"8/4/2021",
"193",
]),
row(&[
"Fremont, WA",
"",
"Daniel",
"5",
"3",
"FALSE",
"$4,500",
"7/13/2019",
"Expired",
]),
row(&[
"Shoreline, WA",
"https://a.com/3.jpeg",
"Roger",
"1",
"1",
"TRUE",
"$1,200",
"12/9/2021",
"320",
]),
],
}
}
#[test]
fn schema_inference() {
let sheet = property_sheet();
let schema = infer_schema(&sheet.values);
assert_eq!(
schema,
Schema::new(vec![
Field::new("Address", DataType::Utf8, false),
Field::new("Image", DataType::Utf8, false),
Field::new("Landlord", DataType::Utf8, false),
Field::new("Bed", DataType::Int64, false),
Field::new("Bath", DataType::Int64, false),
Field::new("Occupied", DataType::Boolean, false),
Field::new("Monthly_Rent", DataType::Utf8, false),
Field::new("Lease_Expiration_Date", DataType::Utf8, false),
Field::new("Days_Until_Expiration", DataType::Utf8, false),
])
);
}
#[test]
fn sheetvalue_to_record_batch() -> anyhow::Result<()> {
let sheet = property_sheet();
let batch = sheet_values_to_record_batch(&sheet.values)?;
assert_eq!(batch.num_columns(), 9);
assert_eq!(
batch.column(3).as_ref(),
Arc::new(Int64Array::from(vec![3, 3, 5, 1])).as_ref(),
);
assert_eq!(
batch.column(5).as_ref(),
Arc::new(BooleanArray::from(vec![false, true, false, true])).as_ref(),
);
assert_eq!(
batch.column(2).as_ref(),
Arc::new(StringArray::from(vec!["Roger", "Sam", "Daniel", "Roger"])).as_ref(),
);
Ok(())
}
}
| 32.142857 | 130 | 0.50915 |
383143a09ac440f71c3e4daa5cee197318bf6a20 | 23,933 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
aws,
cluster::Cluster,
cluster_swarm::{
cluster_swarm_kube::{ClusterSwarmKube, KubeNode},
ClusterSwarm,
},
genesis_helper::GenesisHelper,
instance::{
fullnode_pod_name, lsr_pod_name, validator_pod_name, vault_pod_name,
ApplicationConfig::{Fullnode, Validator, Vault, LSR},
FullnodeConfig, Instance, InstanceConfig, LSRConfig, ValidatorConfig, ValidatorGroup,
VaultConfig,
},
};
use anyhow::{format_err, Result};
use diem_logger::info;
use futures::future::try_join_all;
use std::{fs::File, io::Write, path::Path};
use structopt::StructOpt;
use consensus_types::safety_data::SafetyData;
use diem_genesis_tool::layout::Layout;
use diem_global_constants::{
CONSENSUS_KEY, DIEM_ROOT_KEY, EXECUTION_KEY, FULLNODE_NETWORK_KEY, GENESIS_WAYPOINT,
OPERATOR_KEY, OWNER_KEY, SAFETY_DATA, TREASURY_COMPLIANCE_KEY, VALIDATOR_NETWORK_ADDRESS_KEYS,
VALIDATOR_NETWORK_KEY, WAYPOINT,
};
use diem_secure_storage::{CryptoStorage, KVStorage, Namespaced, Storage, VaultStorage};
use diem_types::{chain_id::ChainId, network_address::NetworkAddress, waypoint::Waypoint};
use std::str::FromStr;
const VAULT_TOKEN: &str = "root";
const VAULT_PORT: u32 = 8200;
const DIEM_ROOT_NS: &str = "val-0";
const VAULT_BACKEND: &str = "vault";
const GENESIS_PATH: &str = "/tmp/genesis.blob";
#[derive(Clone, StructOpt, Debug)]
pub struct ClusterBuilderParams {
#[structopt(long, default_value = "1")]
pub fullnodes_per_validator: u32,
#[structopt(long, parse(try_from_str), default_value = "30")]
pub num_validators: u32,
#[structopt(long)]
pub enable_lsr: Option<bool>,
#[structopt(
long,
help = "Backend used by lsr. Possible Values are in-memory, on-disk, vault",
default_value = "vault"
)]
pub lsr_backend: String,
#[structopt(
long,
help = "Directory containing Move module bytecodes to be published in genesis"
)]
pub move_modules_dir: Option<String>,
}
impl ClusterBuilderParams {
pub fn enable_lsr(&self) -> bool {
self.enable_lsr.unwrap_or(true)
}
}
pub struct ClusterBuilder {
pub current_tag: String,
pub cluster_swarm: ClusterSwarmKube,
}
impl ClusterBuilder {
pub fn new(current_tag: String, cluster_swarm: ClusterSwarmKube) -> Self {
Self {
current_tag,
cluster_swarm,
}
}
pub async fn setup_cluster(
&self,
params: &ClusterBuilderParams,
clean_data: bool,
) -> Result<Cluster> {
self.cluster_swarm
.cleanup()
.await
.map_err(|e| format_err!("cleanup on startup failed: {}", e))?;
let current_tag = &self.current_tag;
info!(
"Deploying with {} tag for validators and fullnodes",
current_tag
);
let asg_name = format!(
"{}-k8s-testnet-validators",
self.cluster_swarm
.get_workspace()
.await
.expect("Failed to get workspace")
);
let mut instance_count =
params.num_validators + (params.fullnodes_per_validator * params.num_validators);
if params.enable_lsr() {
if params.lsr_backend == "vault" {
instance_count += params.num_validators * 2;
} else {
instance_count += params.num_validators;
}
}
if clean_data {
// First scale down to zero instances and wait for it to complete so that we don't schedule pods on
// instances which are going into termination state
aws::set_asg_size(0, 0.0, &asg_name, true, true)
.await
.map_err(|err| format_err!("{} scale down failed: {}", asg_name, err))?;
// Then scale up and bring up new instances
aws::set_asg_size(instance_count as i64, 5.0, &asg_name, true, false)
.await
.map_err(|err| format_err!("{} scale up failed: {}", asg_name, err))?;
}
let modules_dir = if let Some(modules_dir) = ¶ms.move_modules_dir {
modules_dir.clone()
} else {
// No modules specified on command line. Create a tmpdir and populate it with the Diem genesis modules
let mut tempdir = diem_temppath::TempPath::new();
tempdir.create_as_dir()?;
tempdir.persist();
for b in diem_framework_releases::current_module_blobs() {
let mut temppath =
diem_temppath::TempPath::new_with_temp_dir(tempdir.path().to_path_buf());
temppath.create_as_file()?;
temppath.persist(); // otherwise, file will disappear when temppath goes out of scope
let mut file = File::create(temppath.path())?;
file.write_all(b)?;
file.sync_all()?;
}
tempdir.path().to_str().unwrap().to_string()
};
let (validators, lsrs, vaults, fullnodes, waypoint) = self
.spawn_validator_and_fullnode_set(
params.num_validators,
params.fullnodes_per_validator,
params.enable_lsr(),
¶ms.lsr_backend,
current_tag,
&modules_dir,
clean_data,
)
.await
.map_err(|e| format_err!("Failed to spawn_validator_and_fullnode_set: {}", e))?;
let cluster = Cluster::new(validators, fullnodes, lsrs, vaults, waypoint);
info!(
"Deployed {} validators and {} fns",
cluster.validator_instances().len(),
cluster.fullnode_instances().len(),
);
Ok(cluster)
}
/// Creates a set of validators and fullnodes with the given parameters
pub async fn spawn_validator_and_fullnode_set(
&self,
num_validators: u32,
num_fullnodes_per_validator: u32,
enable_lsr: bool,
lsr_backend: &str,
image_tag: &str,
move_modules_dir: &str,
clean_data: bool,
) -> Result<(
Vec<Instance>,
Vec<Instance>,
Vec<Instance>,
Vec<Instance>,
Option<Waypoint>,
)> {
let vault_nodes;
let mut lsr_nodes = vec![];
let mut vaults = vec![];
let mut lsrs = vec![];
let mut waypoint = None;
if enable_lsr {
if lsr_backend == "vault" {
vault_nodes = try_join_all((0..num_validators).map(|i| async move {
let pod_name = vault_pod_name(i);
self.cluster_swarm.allocate_node(&pod_name).await
}))
.await?;
let mut vault_instances: Vec<_> = vault_nodes
.iter()
.enumerate()
.map(|(i, node)| async move {
let vault_config = VaultConfig {};
if clean_data {
self.cluster_swarm.clean_data(&node.name).await?;
}
self.cluster_swarm
.spawn_new_instance(InstanceConfig {
validator_group: ValidatorGroup::new_for_index(i as u32),
application_config: Vault(vault_config),
})
.await
})
.collect();
vaults.append(&mut vault_instances);
} else {
vault_nodes = vec![];
}
lsr_nodes = try_join_all((0..num_validators).map(|i| async move {
let pod_name = lsr_pod_name(i);
self.cluster_swarm.allocate_node(&pod_name).await
}))
.await?;
let mut lsr_instances: Vec<_> = lsr_nodes
.iter()
.enumerate()
.map(|(i, node)| {
let vault_nodes = &vault_nodes;
async move {
let vault_addr = if enable_lsr && lsr_backend == "vault" {
Some(vault_nodes[i].internal_ip.clone())
} else {
None
};
let vault_namespace = if enable_lsr && lsr_backend == "vault" {
Some(validator_pod_name(i as u32))
} else {
None
};
let lsr_config = LSRConfig {
image_tag: image_tag.to_string(),
lsr_backend: lsr_backend.to_string(),
vault_addr,
vault_namespace,
};
if clean_data {
self.cluster_swarm.clean_data(&node.name).await?;
}
self.cluster_swarm
.spawn_new_instance(InstanceConfig {
validator_group: ValidatorGroup::new_for_index(i as u32),
application_config: LSR(lsr_config),
})
.await
}
})
.collect();
lsrs.append(&mut lsr_instances);
} else {
vault_nodes = vec![];
}
let lsrs = try_join_all(lsrs).await?;
let vaults = try_join_all(vaults).await?;
let validator_nodes = try_join_all((0..num_validators).map(|i| async move {
let pod_name = validator_pod_name(i);
self.cluster_swarm.allocate_node(&pod_name).await
}))
.await?;
let fullnode_nodes = try_join_all((0..num_validators).flat_map(move |validator_index| {
(0..num_fullnodes_per_validator).map(move |fullnode_index| async move {
let pod_name = fullnode_pod_name(validator_index, fullnode_index);
self.cluster_swarm.allocate_node(&pod_name).await
})
}))
.await?;
if !vault_nodes.is_empty() {
info!("Generating genesis with management tool.");
try_join_all(vault_nodes.iter().enumerate().map(|(i, node)| async move {
diem_retrier::retry_async(diem_retrier::fixed_retry_strategy(5000, 15), || {
Box::pin(async move { self.initialize_vault(i as u32, node).await })
})
.await
}))
.await?;
waypoint = Some(
self.generate_genesis(
num_validators,
&vault_nodes,
&validator_nodes,
&fullnode_nodes,
move_modules_dir,
)
.await?,
);
info!("Done generating genesis.");
}
let validators = (0..num_validators).map(|i| {
let validator_nodes = &validator_nodes;
let lsr_nodes = &lsr_nodes;
let vault_nodes = &vault_nodes;
async move {
let vault_addr = if enable_lsr && lsr_backend == "vault" {
Some(vault_nodes[i as usize].internal_ip.clone())
} else {
None
};
let vault_namespace = if enable_lsr && lsr_backend == "vault" {
Some(validator_pod_name(i))
} else {
None
};
let safety_rules_addr = if enable_lsr {
Some(lsr_nodes[i as usize].internal_ip.clone())
} else {
None
};
let validator_config = ValidatorConfig {
enable_lsr,
image_tag: image_tag.to_string(),
safety_rules_addr,
vault_addr,
vault_namespace,
};
if clean_data {
self.cluster_swarm
.clean_data(&validator_nodes[i as usize].name)
.await?;
}
self.cluster_swarm
.spawn_new_instance(InstanceConfig {
validator_group: ValidatorGroup::new_for_index(i),
application_config: Validator(validator_config),
})
.await
}
});
let fullnodes = (0..num_validators).flat_map(|validator_index| {
let fullnode_nodes = &fullnode_nodes;
let validator_nodes = &validator_nodes;
let vault_nodes = &vault_nodes;
(0..num_fullnodes_per_validator).map(move |fullnode_index| async move {
let vault_addr = if enable_lsr && lsr_backend == "vault" {
Some(vault_nodes[validator_index as usize].internal_ip.clone())
} else {
None
};
let vault_namespace = if enable_lsr && lsr_backend == "vault" {
Some(validator_pod_name(validator_index))
} else {
None
};
let seed_peer_ip = validator_nodes[validator_index as usize]
.internal_ip
.clone();
let fullnode_config = FullnodeConfig {
fullnode_index,
image_tag: image_tag.to_string(),
seed_peer_ip,
vault_addr,
vault_namespace,
};
if clean_data {
self.cluster_swarm
.clean_data(
&fullnode_nodes[(validator_index * num_fullnodes_per_validator
+ fullnode_index)
as usize]
.name,
)
.await?;
}
self.cluster_swarm
.spawn_new_instance(InstanceConfig {
validator_group: ValidatorGroup::new_for_index(validator_index),
application_config: Fullnode(fullnode_config),
})
.await
})
});
let validators = try_join_all(validators).await?;
let fullnodes = try_join_all(fullnodes).await?;
Ok((validators, lsrs, vaults, fullnodes, waypoint))
}
async fn initialize_vault(&self, validator_index: u32, vault_node: &KubeNode) -> Result<()> {
let addr = vault_node.internal_ip.clone();
tokio::task::spawn_blocking(move || {
let pod_name = validator_pod_name(validator_index);
let mut vault_storage = Storage::from(Namespaced::new(
&pod_name,
Box::new(Storage::from(VaultStorage::new(
format!("http://{}:{}", addr, VAULT_PORT),
VAULT_TOKEN.to_string(),
None,
None,
true,
None,
None,
))),
));
if validator_index == 0 {
vault_storage.create_key(DIEM_ROOT_KEY).map_err(|e| {
format_err!("Failed to create {}__{} : {}", pod_name, DIEM_ROOT_KEY, e)
})?;
let key = vault_storage
.export_private_key(DIEM_ROOT_KEY)
.map_err(|e| {
format_err!("Failed to export {}__{} : {}", pod_name, DIEM_ROOT_KEY, e)
})?;
vault_storage
.import_private_key(TREASURY_COMPLIANCE_KEY, key)
.map_err(|e| {
format_err!(
"Failed to import {}__{} : {}",
pod_name,
TREASURY_COMPLIANCE_KEY,
e
)
})?;
}
let keys = vec![
OWNER_KEY,
OPERATOR_KEY,
CONSENSUS_KEY,
EXECUTION_KEY,
VALIDATOR_NETWORK_KEY,
FULLNODE_NETWORK_KEY,
];
for key in keys {
vault_storage
.create_key(key)
.map_err(|e| format_err!("Failed to create {}__{} : {}", pod_name, key, e))?;
}
vault_storage
.set(SAFETY_DATA, SafetyData::new(0, 0, 0, 0, None))
.map_err(|e| format_err!("Failed to create {}/{}: {}", pod_name, SAFETY_DATA, e))?;
vault_storage
.set(WAYPOINT, Waypoint::default())
.map_err(|e| format_err!("Failed to create {}/{} : {}", pod_name, WAYPOINT, e))?;
vault_storage
.set(GENESIS_WAYPOINT, Waypoint::default())
.map_err(|e| format_err!("Failed to create {}/{} : {}", pod_name, WAYPOINT, e))?;
diem_network_address_encryption::Encryptor::new(vault_storage)
.initialize_for_testing()
.map_err(|e| {
format_err!(
"Failed to create {}/{} : {}",
pod_name,
VALIDATOR_NETWORK_ADDRESS_KEYS,
e
)
})?;
Ok::<(), anyhow::Error>(())
})
.await??;
Ok(())
}
async fn generate_genesis(
&self,
num_validators: u32,
vault_nodes: &[KubeNode],
validator_nodes: &[KubeNode],
fullnode_nodes: &[KubeNode],
move_modules_dir: &str,
) -> Result<Waypoint> {
let genesis_helper = GenesisHelper::new("/tmp/genesis.json");
let owners: Vec<_> = (0..num_validators).map(validator_pod_name).collect();
let layout = Layout {
owners: owners.clone(),
operators: owners,
diem_root: DIEM_ROOT_NS.to_string(),
treasury_compliance: DIEM_ROOT_NS.to_string(),
};
let layout_path = "/tmp/layout.yaml";
write!(
File::create(layout_path).map_err(|e| format_err!(
"Failed to create {} : {}",
layout_path,
e
))?,
"{}",
toml::to_string(&layout)?
)
.map_err(|e| format_err!("Failed to write {} : {}", layout_path, e))?;
let token_path = "/tmp/token";
write!(
File::create(token_path).map_err(|e| format_err!(
"Failed to create {} : {}",
token_path,
e
))?,
"{}",
VAULT_TOKEN
)
.map_err(|e| format_err!("Failed to write {} : {}", token_path, e))?;
genesis_helper
.set_layout(layout_path, "common")
.await
.map_err(|e| format_err!("Failed to set_layout : {}", e))?;
genesis_helper
.set_move_modules(move_modules_dir, "common")
.await
.map_err(|e| format_err!("Failed to set_move_modules : {}", e))?;
genesis_helper
.diem_root_key(
VAULT_BACKEND,
format!("http://{}:{}", vault_nodes[0].internal_ip, VAULT_PORT).as_str(),
token_path,
DIEM_ROOT_NS,
DIEM_ROOT_NS,
)
.await
.map_err(|e| format_err!("Failed to diem_root_key : {}", e))?;
genesis_helper
.treasury_compliance_key(
VAULT_BACKEND,
format!("http://{}:{}", vault_nodes[0].internal_ip, VAULT_PORT).as_str(),
token_path,
DIEM_ROOT_NS,
DIEM_ROOT_NS,
)
.await
.map_err(|e| format_err!("Failed to diem_root_key : {}", e))?;
for (i, node) in vault_nodes.iter().enumerate() {
let pod_name = validator_pod_name(i as u32);
genesis_helper
.owner_key(
VAULT_BACKEND,
format!("http://{}:{}", node.internal_ip, VAULT_PORT).as_str(),
token_path,
&pod_name,
&pod_name,
)
.await
.map_err(|e| format_err!("Failed to owner_key for {} : {}", pod_name, e))?;
genesis_helper
.operator_key(
VAULT_BACKEND,
format!("http://{}:{}", node.internal_ip, VAULT_PORT).as_str(),
token_path,
&pod_name,
&pod_name,
)
.await
.map_err(|e| format_err!("Failed to operator_key for {} : {}", pod_name, e))?;
let fullnode_ip = if fullnode_nodes.is_empty() {
"0.0.0.0"
} else {
&fullnode_nodes[i].internal_ip
};
genesis_helper
.validator_config(
&pod_name,
NetworkAddress::from_str(
format!("/ip4/{}/tcp/{}", validator_nodes[i].internal_ip, 6180).as_str(),
)
.expect("Failed to parse network address"),
NetworkAddress::from_str(format!("/ip4/{}/tcp/{}", fullnode_ip, 6182).as_str())
.expect("Failed to parse network address"),
ChainId::test(),
VAULT_BACKEND,
format!("http://{}:{}", node.internal_ip, VAULT_PORT).as_str(),
token_path,
&pod_name,
&pod_name,
)
.await
.map_err(|e| format_err!("Failed to validator_config for {} : {}", pod_name, e))?;
genesis_helper
.set_operator(&pod_name, &pod_name)
.await
.map_err(|e| format_err!("Failed to set_operator for {} : {}", pod_name, e))?;
}
genesis_helper
.genesis(ChainId::test(), Path::new(GENESIS_PATH))
.await?;
let waypoint = genesis_helper
.create_waypoint(ChainId::test())
.await
.map_err(|e| format_err!("Failed to create_waypoint : {}", e))?;
for (i, node) in vault_nodes.iter().enumerate() {
let pod_name = validator_pod_name(i as u32);
genesis_helper
.create_and_insert_waypoint(
ChainId::test(),
VAULT_BACKEND,
format!("http://{}:{}", node.internal_ip, VAULT_PORT).as_str(),
token_path,
&pod_name,
)
.await
.map_err(|e| {
format_err!(
"Failed to create_and_insert_waypoint for {} : {}",
pod_name,
e
)
})?;
}
genesis_helper
.extract_private_key(
format!("{}__{}", DIEM_ROOT_NS, DIEM_ROOT_KEY).as_str(),
"/tmp/mint.key",
VAULT_BACKEND,
format!("http://{}:{}", vault_nodes[0].internal_ip, VAULT_PORT).as_str(),
token_path,
)
.await
.map_err(|e| format_err!("Failed to extract_private_key : {}", e))?;
Ok(waypoint)
}
}
| 38.726537 | 114 | 0.486149 |
e8565a64eff666068387bdad00ee505f481e1bb6 | 7,963 | //! Transaction fees.
use std::collections::HashMap;
use exonum::crypto::PublicKey;
use exonum::storage::{Fork, Snapshot};
use currency::assets;
use currency::assets::{AssetBundle, MetaAsset, TradeAsset};
use currency::configuration::Configuration;
use currency::error::Error;
use currency::wallet;
use currency::wallet::Wallet;
use currency::Service;
/// For exchange transactions, determines who shall pay the fees.
#[repr(u8)]
#[derive(PartialEq, Eq)]
pub enum FeeStrategy {
/// Recipient pays.
Recipient = 1,
/// Sender pays.
Sender = 2,
/// Recipient and sender share paying the fee.
RecipientAndSender = 3,
/// Intermediary pays.
Intermediary = 4,
}
impl FeeStrategy {
/// Try converting from an u8. To be replaced when the `TryFrom` trait
/// is stabilised.
pub fn try_from(value: u8) -> Option<Self> {
match value {
1 => Some(FeeStrategy::Recipient),
2 => Some(FeeStrategy::Sender),
3 => Some(FeeStrategy::RecipientAndSender),
4 => Some(FeeStrategy::Intermediary),
_ => None,
}
}
}
/// Transaction fees.
pub struct ThirdPartyFees(pub HashMap<PublicKey, u64>);
impl ThirdPartyFees {
/// Create `ThirdPartyFees` for an `add_assets` transaction.
pub fn new_add_assets<S, I>(view: S, assets: I) -> Result<ThirdPartyFees, Error>
where
S: AsRef<Snapshot>,
I: IntoIterator<Item = MetaAsset>,
{
let fees_config = Configuration::extract(view.as_ref()).fees();
let per_asset = fees_config.add_assets_per_entry();
let assets_fee = assets
.into_iter()
.map(|meta| meta.amount() * per_asset)
.sum();
let to_third_party = Some((Service::genesis_wallet(view), assets_fee))
.into_iter()
.collect();
let fees = ThirdPartyFees(to_third_party);
Ok(fees)
}
/// Create `ThirdPartyFees` for an `delete_assets` transaction.
pub fn new_delete_assets<S, I>(_view: S, _assets: I) -> Result<ThirdPartyFees, Error>
where
S: AsRef<Snapshot>,
I: IntoIterator<Item = AssetBundle>,
{
let to_third_party = HashMap::new();
let fees = ThirdPartyFees(to_third_party);
Ok(fees)
}
/// Create `ThirdPartyFees` for `trade` transactions.
pub fn new_trade<'a, S, I>(view: S, assets: I) -> Result<ThirdPartyFees, Error>
where
S: AsRef<Snapshot>,
I: IntoIterator<Item = &'a TradeAsset>,
<I as IntoIterator>::IntoIter: Clone,
{
let view = view.as_ref();
let assets = assets.into_iter();
let mut to_third_party = HashMap::new();
for asset in assets {
let info = assets::Schema(view)
.fetch(&asset.id())
.ok_or_else(|| Error::AssetNotFound)?;
let fee = info.fees().trade().for_price(asset.price()) * asset.amount() ;
to_third_party
.entry(*info.creator())
.and_modify(|prev_fee| {
*prev_fee += fee;
})
.or_insert(fee);
}
let fees = ThirdPartyFees(to_third_party);
Ok(fees)
}
/// Create `ThirdPartyFees` for `exchange` transactions.
pub fn new_exchange<S, I>(view: S, assets: I) -> Result<Self, Error>
where
S: AsRef<Snapshot>,
I: IntoIterator<Item = AssetBundle>,
{
let view = view.as_ref();
let mut to_third_party = HashMap::new();
for asset in assets {
let info = assets::Schema(view)
.fetch(&asset.id())
.ok_or_else(|| Error::AssetNotFound)?;
let fee = info.fees().exchange().fixed() * asset.amount();
to_third_party
.entry(*info.creator())
.and_modify(|prev_fee| {
*prev_fee += fee;
})
.or_insert(fee);
}
let fees = ThirdPartyFees(to_third_party);
Ok(fees)
}
/// Create `ThirdPartyFees` for `transfer` transactions.
pub fn new_transfer<S, I>(view: S, assets: I) -> Result<Self, Error>
where
S: AsRef<Snapshot>,
I: IntoIterator<Item = AssetBundle>,
{
let view = view.as_ref();
let mut to_third_party = HashMap::new();
for asset in assets {
let info = assets::Schema(view)
.fetch(&asset.id())
.ok_or_else(|| Error::AssetNotFound)?;
let fee = info.fees().transfer().fixed() * asset.amount();
to_third_party
.entry(*info.creator())
.and_modify(|prev_fee| {
*prev_fee += fee;
})
.or_insert(fee);
}
let fees = ThirdPartyFees(to_third_party);
Ok(fees)
}
/// Total amound that needs to be paid to third party wallets.
pub fn total(&self) -> u64 {
self.0.values().sum()
}
pub fn total_for_wallet(&self, pub_key: &PublicKey) -> u64 {
self.0
.iter()
.filter_map(|(key, fee)| if key != pub_key { Some(fee) } else { None })
.sum()
}
/// Add a new fee to the list of third party payments.
pub fn add_fee(&mut self, key: &PublicKey, fee: u64) {
self.0
.entry(*key)
.and_modify(|prev_fee| {
*prev_fee += fee;
})
.or_insert(fee);
}
/// Collect fees to third party wallets.
///
/// Returns a list of wallets modified by fee withdrawal.
/// This list must usually not be committed or discarded before
/// the transaction has otherwise successfully executed.
///
/// # Errors
/// Returns `InsufficientFunds` if the payer is unable to pay the fees.
pub fn collect(
&self,
view: &Fork,
payer_key: &PublicKey,
) -> Result<HashMap<PublicKey, Wallet>, Error> {
let mut payer = wallet::Schema(&*view).fetch(&payer_key);
let mut updated_wallets = self.0
.iter()
.filter(|&(key, _)| key != payer_key)
.map(|(key, fee)| {
let mut wallet = wallet::Schema(&*view).fetch(key);
wallet::move_coins(&mut payer, &mut wallet, *fee)?;
Ok((*key, wallet))
})
.collect::<Result<HashMap<_, _>, _>>()?;
updated_wallets.entry(*payer_key).or_insert(payer);
Ok(updated_wallets)
}
/// Split fees to third party wallets between two payers.
pub fn collect2(
&self,
view: &mut Fork,
payer_key_1: &PublicKey,
payer_key_2: &PublicKey,
) -> Result<HashMap<PublicKey, Wallet>, Error> {
let mut payer_1 = wallet::Schema(&*view).fetch(&payer_key_1);
let mut payer_2 = wallet::Schema(&*view).fetch(&payer_key_2);
let mut to_third_party = self.0.clone();
if let Some(fee) = to_third_party.remove(payer_key_1) {
wallet::move_coins(&mut payer_2, &mut payer_1, fee / 2)?;
}
if let Some(fee) = to_third_party.remove(payer_key_2) {
wallet::move_coins(&mut payer_1, &mut payer_2, fee / 2)?;
}
let mut updated_wallets = to_third_party
.iter()
.map(|(key, fee)| {
let mut wallet = wallet::Schema(&*view).fetch(&key);
wallet::move_coins(&mut payer_1, &mut wallet, fee / 2)?;
wallet::move_coins(&mut payer_2, &mut wallet, fee / 2)?;
Ok((*key, wallet))
})
.collect::<Result<HashMap<_, _>, _>>()?;
updated_wallets.insert(*payer_key_1, payer_1);
updated_wallets.insert(*payer_key_2, payer_2);
Ok(updated_wallets)
}
}
pub trait FeesCalculator {
fn calculate_fees(&self, view: &mut Fork) -> Result<HashMap<PublicKey, u64>, Error>;
}
| 30.162879 | 89 | 0.555444 |
145b913902ae6a25719d441ab2a7b62f35c5f3a6 | 1,350 | use criterion::{BatchSize, Criterion};
fn some_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("overhead");
group.bench_function("iter", |b| b.iter(|| 1));
group.bench_function("iter_with_setup", |b| b.iter_with_setup(|| (), |_| 1));
group.bench_function("iter_with_large_setup", |b| {
b.iter_with_large_setup(|| (), |_| 1)
});
group.bench_function("iter_with_large_drop", |b| b.iter_with_large_drop(|| 1));
group.bench_function("iter_batched_small_input", |b| {
b.iter_batched(|| (), |_| 1, BatchSize::SmallInput)
});
group.bench_function("iter_batched_large_input", |b| {
b.iter_batched(|| (), |_| 1, BatchSize::LargeInput)
});
group.bench_function("iter_batched_per_iteration", |b| {
b.iter_batched(|| (), |_| 1, BatchSize::PerIteration)
});
group.bench_function("iter_batched_ref_small_input", |b| {
b.iter_batched_ref(|| (), |_| 1, BatchSize::SmallInput)
});
group.bench_function("iter_batched_ref_large_input", |b| {
b.iter_batched_ref(|| (), |_| 1, BatchSize::LargeInput)
});
group.bench_function("iter_batched_ref_per_iteration", |b| {
b.iter_batched_ref(|| (), |_| 1, BatchSize::PerIteration)
});
group.finish();
}
criterion_group!(benches, some_benchmark);
| 40.909091 | 84 | 0.63037 |
69bf4e0b0107a99b25a1ab5779ecef54260a2de8 | 2,889 | /// floki - the development container launcher
#[macro_use]
extern crate log;
mod cli;
mod command;
mod config;
mod dind;
mod environment;
mod errors;
mod image;
mod interpret;
mod spec;
mod volumes;
use anyhow::Error;
use cli::{Cli, Subcommand};
use config::FlokiConfig;
use environment::Environment;
use structopt::StructOpt;
fn main() -> Result<(), Error> {
let args = Cli::from_args();
configure_logging(args.verbosity)?;
match run_floki_from_args(&args) {
Ok(()) => (),
Err(e) => {
error!("A problem occurred: {}", e);
std::process::exit(1);
}
}
Ok(())
}
/// Decide which commands to run given the input from the shell
fn run_floki_from_args(args: &Cli) -> Result<(), Error> {
debug!("Got command line arguments: {:?}", &args);
if args.local {
warn!("-l/--local is deprecated and may be removed in a future release");
}
// Dispatch appropriate subcommand
match &args.subcommand {
// Pull the image in the configuration file
Some(Subcommand::Pull {}) => {
let env = Environment::gather(&args.config_file)?;
let config = FlokiConfig::from_file(&env.config_file)?;
image::pull_image(&config.image.name()?)
}
// Run a command in the floki container
Some(Subcommand::Run { command }) => {
let env = Environment::gather(&args.config_file)?;
let config = FlokiConfig::from_file(&env.config_file)?;
let inner_command = interpret::command_in_shell(config.shell.inner_shell(), &command);
interpret::run_floki_container(&spec::FlokiSpec::from(config, env)?, &inner_command)
}
Some(Subcommand::Completion { shell }) => {
Cli::clap().gen_completions_to("floki", *shell, &mut std::io::stdout());
Ok(())
}
// Launch an interactive floki shell (the default)
None => {
let env = Environment::gather(&args.config_file)?;
let config = FlokiConfig::from_file(&env.config_file)?;
let inner_command = config.shell.inner_shell().to_string();
interpret::run_floki_container(&spec::FlokiSpec::from(config, env)?, &inner_command)
}
}
}
/// Configure the logger
fn configure_logging(verbosity: u8) -> Result<(), Error> {
let level = match verbosity {
0 => log::LevelFilter::Warn,
1 => log::LevelFilter::Info,
2 => log::LevelFilter::Debug,
3 => log::LevelFilter::Trace,
_ => {
return Err(
errors::FlokiUserError::InvalidVerbositySetting { setting: verbosity }.into(),
)
}
};
simplelog::TermLogger::init(
level,
simplelog::Config::default(),
simplelog::TerminalMode::Stderr,
simplelog::ColorChoice::Auto,
)?;
Ok(())
}
| 29.783505 | 98 | 0.591554 |
564fb69a1befec651b89de2e445cf452f6c32ae5 | 1,175 | #[doc = "Reader of register RETXCNTRMI"]
pub type R = crate::R<u32, super::RETXCNTRMI>;
#[doc = "Writer for register RETXCNTRMI"]
pub type W = crate::W<u32, super::RETXCNTRMI>;
#[doc = "Register RETXCNTRMI `reset()`'s with value 0"]
impl crate::ResetValue for super::RETXCNTRMI {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `RETXCNTRMI`"]
pub type RETXCNTRMI_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `RETXCNTRMI`"]
pub struct RETXCNTRMI_W<'a> {
w: &'a mut W,
}
impl<'a> RETXCNTRMI_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - Resent count inquiry register."]
#[inline(always)]
pub fn retxcntrmi(&self) -> RETXCNTRMI_R {
RETXCNTRMI_R::new((self.bits & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - Resent count inquiry register."]
#[inline(always)]
pub fn retxcntrmi(&mut self) -> RETXCNTRMI_W {
RETXCNTRMI_W { w: self }
}
}
| 28.658537 | 70 | 0.597447 |
9b0078819def7f66d1b4dd9b84d7a450114f5f67 | 922 | // Copyright 2020 Andy Grove
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Cast an Arrow Array to its expected type
#[macro_export]
macro_rules! cast_array {
($SELF:ident, $ARRAY_TYPE:ident) => {{
match $SELF.as_any().downcast_ref::<array::$ARRAY_TYPE>() {
Some(array) => Ok(array),
None => Err(ballista_error("Failed to cast array to expected type")),
}
}};
}
| 36.88 | 81 | 0.691974 |
dea856206c6b8b4335905ef779b01035bac6dd7d | 5,762 | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Common macros for crypto module.
macro_rules! implement_public_crypto_wrapper {
($(#[$attr:meta])* struct $name:ident, $size:expr) => {
/// Cryptographic primitive implementation newtype.
#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)]
$(#[$attr])*
pub struct $name($crate::crypto::crypto_impl::$name);
impl $name {
/// Creates a new instance filled with zeros.
pub fn zero() -> Self {
$name::new([0; $size])
}
}
impl $name {
/// Creates a new instance from bytes array.
pub fn new(bytes_array: [u8; $size]) -> Self {
$name($crate::crypto::crypto_impl::$name(bytes_array))
}
/// Creates a new instance from bytes slice.
pub fn from_slice(bytes_slice: &[u8]) -> Option<Self> {
$crate::crypto::crypto_impl::$name::from_slice(bytes_slice).map($name)
}
/// Copies bytes from this instance.
pub fn as_bytes(&self) -> [u8; $size] {
(self.0).0
}
/// Returns a hex representation of binary data.
/// Lower case letters are used (e.g. `f9b4ca`).
pub fn to_hex(&self) -> String {
$crate::crypto::encode_hex(self)
}
}
impl AsRef<[u8]> for $name {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl Default for $name {
fn default() -> Self {
Self::zero()
}
}
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut hex = String::with_capacity($crate::crypto::BYTES_IN_DEBUG + $crate::crypto::BYTES_IN_ELLIPSIS);
$crate::crypto::write_short_hex(&mut hex, &self[..])?;
f.debug_tuple(stringify!($name))
.field(&hex)
.finish()
}
}
impl fmt::Display for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.to_hex())
}
}
impl std::str::FromStr for $name {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_hex(s)
}
}
};
}
macro_rules! implement_serde {
($name:ident) => {
impl FromHex for $name {
type Error = FromHexError;
fn from_hex<T: AsRef<[u8]>>(v: T) -> Result<Self, Self::Error> {
let bytes = Vec::<u8>::from_hex(v)?;
if let Some(self_value) = Self::from_slice(bytes.as_ref()) {
Ok(self_value)
} else {
Err(FromHexError::InvalidStringLength)
}
}
}
impl Serialize for $name {
fn serialize<S>(&self, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let hex_string = encode_hex(&self[..]);
ser.serialize_str(&hex_string)
}
}
impl<'de> Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct HexVisitor;
impl<'v> Visitor<'v> for HexVisitor {
type Value = $name;
fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(fmt, "expecting str.")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
$name::from_hex(s).map_err(|_| de::Error::custom("Invalid hex"))
}
}
deserializer.deserialize_str(HexVisitor)
}
}
};
}
macro_rules! implement_index_traits {
($new_type:ident) => {
impl Index<Range<usize>> for $new_type {
type Output = [u8];
fn index(&self, _index: Range<usize>) -> &[u8] {
let inner = &self.0;
inner.0.index(_index)
}
}
impl Index<RangeTo<usize>> for $new_type {
type Output = [u8];
fn index(&self, _index: RangeTo<usize>) -> &[u8] {
let inner = &self.0;
inner.0.index(_index)
}
}
impl Index<RangeFrom<usize>> for $new_type {
type Output = [u8];
fn index(&self, _index: RangeFrom<usize>) -> &[u8] {
let inner = &self.0;
inner.0.index(_index)
}
}
impl Index<RangeFull> for $new_type {
type Output = [u8];
fn index(&self, _index: RangeFull) -> &[u8] {
let inner = &self.0;
inner.0.index(_index)
}
}
};
}
| 32.925714 | 120 | 0.47848 |
79d020fae89d2f2a6413b8bfeb4803de6d5c310e | 146 | extern crate lolbench ; # [ test ] fn end_to_end ( ) {
lolbench :: end_to_end_test (
"byteorder_1_2_6" , "uint128_12::read_little_endian" , ) ; } | 48.666667 | 60 | 0.691781 |
16470f1ff02f4092fa72cdccb7c1face91f74af4 | 2,074 | use std::error::Error;
use std::thread;
use std::time::Duration;
use blurz::{BluetoothAdapter, BluetoothDevice, BluetoothDiscoverySession, BluetoothSession};
use ruuvi_sensor_protocol::{Acceleration, AccelerationVector, SensorValues};
static RUUVI_MANUFACTURER_ID: u16 = 0x0499;
static DISCOVERY_WAIT: u64 = 2;
#[derive(Debug)]
pub struct RuuviDevice {
id: String,
acceleration: AccelerationVector,
}
impl RuuviDevice {
fn new(device: BluetoothDevice) -> Option<Self> {
device
.get_manufacturer_data()
.ok()
.and_then(|data| data.get(&RUUVI_MANUFACTURER_ID).map(|d| d.clone()))
.and_then(|data| {
SensorValues::from_manufacturer_specific_data(RUUVI_MANUFACTURER_ID, &data).ok()
})
.and_then(|values| values.acceleration_vector_as_milli_g())
.and_then(|acc| {
let id = match device.get_alias() {
Ok(name) => name,
Err(_e) => device.get_id(),
};
Some(RuuviDevice {
id,
acceleration: acc.clone(),
})
})
}
pub fn get_acceleration(&self) -> AccelerationVector {
self.acceleration
}
pub fn get_id(&self) -> String {
self.id.clone()
}
}
pub fn find_ruuvi_devices() -> Result<Vec<RuuviDevice>, Box<dyn Error>> {
let session = BluetoothSession::create_session(None)?;
let adapter = BluetoothAdapter::init(&session)?;
let discovery_session = BluetoothDiscoverySession::create_session(&session, adapter.get_id())?;
discovery_session.start_discovery()?;
thread::sleep(Duration::from_secs(DISCOVERY_WAIT));
let rv = adapter.get_device_list().map(|device_ids| {
device_ids
.iter()
.map(|device_id| BluetoothDevice::new(&session, device_id.to_string()))
.filter_map(|device| RuuviDevice::new(device))
.collect::<Vec<RuuviDevice>>()
});
discovery_session.stop_discovery()?;
rv
}
| 32.40625 | 99 | 0.608004 |
9cd375d6f08d8f6746c11538c6974c16bf6e3037 | 12,548 | // Currently, rust warns when an unsafe fn contains an unsafe {} block. However,
// in the future, this will change to the reverse. For now, suppress this
// warning and generally stick with being explicit about unsafety.
#![allow(unused_unsafe)]
#![cfg_attr(not(feature = "rt"), allow(dead_code))]
//! Time driver
mod entry;
pub(self) use self::entry::{EntryList, TimerEntry, TimerHandle, TimerShared};
mod handle;
pub(crate) use self::handle::Handle;
mod wheel;
pub(super) mod sleep;
use crate::driver::Driver;
use crate::time::error::Error;
use crate::time::{Clock, Duration, Instant};
use std::cell::RefCell;
use std::convert::TryInto;
use std::rc::Rc;
use std::{fmt, io};
use std::{num::NonZeroU64, ptr::NonNull};
/// Time implementation that drives [`Sleep`][sleep], [`Interval`][interval], and [`Timeout`][timeout].
///
/// A `Driver` instance tracks the state necessary for managing time and
/// notifying the [`Sleep`][sleep] instances once their deadlines are reached.
///
/// It is expected that a single instance manages many individual [`Sleep`][sleep]
/// instances. The `Driver` implementation is thread-safe and, as such, is able
/// to handle callers from across threads.
///
/// After creating the `Driver` instance, the caller must repeatedly call `park`
/// or `park_timeout`. The time driver will perform no work unless `park` or
/// `park_timeout` is called repeatedly.
///
/// The driver has a resolution of one millisecond. Any unit of time that falls
/// between milliseconds are rounded up to the next millisecond.
///
/// When an instance is dropped, any outstanding [`Sleep`][sleep] instance that has not
/// elapsed will be notified with an error. At this point, calling `poll` on the
/// [`Sleep`][sleep] instance will result in panic.
///
/// # Implementation
///
/// The time driver is based on the [paper by Varghese and Lauck][paper].
///
/// A hashed timing wheel is a vector of slots, where each slot handles a time
/// slice. As time progresses, the timer walks over the slot for the current
/// instant, and processes each entry for that slot. When the timer reaches the
/// end of the wheel, it starts again at the beginning.
///
/// The implementation maintains six wheels arranged in a set of levels. As the
/// levels go up, the slots of the associated wheel represent larger intervals
/// of time. At each level, the wheel has 64 slots. Each slot covers a range of
/// time equal to the wheel at the lower level. At level zero, each slot
/// represents one millisecond of time.
///
/// The wheels are:
///
/// * Level 0: 64 x 1 millisecond slots.
/// * Level 1: 64 x 64 millisecond slots.
/// * Level 2: 64 x ~4 second slots.
/// * Level 3: 64 x ~4 minute slots.
/// * Level 4: 64 x ~4 hour slots.
/// * Level 5: 64 x ~12 day slots.
///
/// When the timer processes entries at level zero, it will notify all the
/// `Sleep` instances as their deadlines have been reached. For all higher
/// levels, all entries will be redistributed across the wheel at the next level
/// down. Eventually, as time progresses, entries with [`Sleep`][sleep] instances will
/// either be canceled (dropped) or their associated entries will reach level
/// zero and be notified.
///
/// [paper]: http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
/// [sleep]: crate::time::Sleep
/// [timeout]: crate::time::Timeout
/// [interval]: crate::time::Interval
#[derive(Debug)]
pub struct TimeDriver<D: Driver + 'static> {
/// Timing backend in use
time_source: ClockTime,
/// Shared state
pub(crate) handle: Handle,
/// Parker to delegate to
park: D,
}
/// A structure which handles conversion from Instants to u64 timestamps.
#[derive(Debug, Clone)]
pub(self) struct ClockTime {
clock: super::clock::Clock,
start_time: Instant,
}
impl ClockTime {
pub(self) fn new(clock: Clock) -> Self {
Self {
start_time: clock.now(),
clock,
}
}
pub(self) fn deadline_to_tick(&self, t: Instant) -> u64 {
// Round up to the end of a ms
self.instant_to_tick(t + Duration::from_nanos(999_999))
}
pub(self) fn instant_to_tick(&self, t: Instant) -> u64 {
// round up
let dur: Duration = t
.checked_duration_since(self.start_time)
.unwrap_or_else(|| Duration::from_secs(0));
let ms = dur.as_millis();
ms.try_into().expect("Duration too far into the future")
}
pub(self) fn tick_to_duration(&self, t: u64) -> Duration {
Duration::from_millis(t)
}
pub(self) fn now(&self) -> u64 {
self.instant_to_tick(self.clock.now())
}
}
/// Timer state shared between `Driver`, `Handle`, and `Registration`.
struct Inner {
// The state is split like this so `Handle` can access `is_shutdown` without locking the mutex
pub(super) state: RefCell<InnerState>,
}
/// Time state shared which must be protected by a `Mutex`
struct InnerState {
/// Timing backend in use
time_source: ClockTime,
/// The last published timer `elapsed` value.
elapsed: u64,
/// The earliest time at which we promise to wake up without unparking
next_wake: Option<NonZeroU64>,
/// Timer wheel
wheel: wheel::Wheel,
}
// ===== impl Driver =====
impl<D> TimeDriver<D>
where
D: Driver + 'static,
{
/// Creates a new `Driver` instance that uses `park` to block the current
/// thread and `time_source` to get the current time and convert to ticks.
///
/// Specifying the source of time is useful when testing.
pub(crate) fn new(park: D, clock: Clock) -> TimeDriver<D> {
let time_source = ClockTime::new(clock);
let inner = Inner::new(time_source.clone());
TimeDriver {
time_source,
handle: Handle::new(Rc::new(inner)),
park,
}
}
/// Returns a handle to the timer.
///
/// The `Handle` is how `Sleep` instances are created. The `Sleep` instances
/// can either be created directly or the `Handle` instance can be passed to
/// `with_default`, setting the timer as the default timer for the execution
/// context.
pub(crate) fn handle(&self) -> Handle {
self.handle.clone()
}
fn park_internal(&self, limit: Option<Duration>) -> io::Result<()> {
let mut inner_state = self.handle.get().state.borrow_mut();
let next_wake = inner_state.wheel.next_expiration_time();
inner_state.next_wake =
next_wake.map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap()));
drop(inner_state);
match next_wake {
Some(when) => {
let now = self.time_source.now();
// Note that we effectively round up to 1ms here - this avoids
// very short-duration microsecond-resolution sleeps that the OS
// might treat as zero-length.
let mut duration = self.time_source.tick_to_duration(when.saturating_sub(now));
if duration > Duration::from_millis(0) {
if let Some(limit) = limit {
duration = std::cmp::min(limit, duration);
}
self.park.park_timeout(duration)?;
} else {
self.park.park_timeout(Duration::from_secs(0))?;
}
}
None => {
if let Some(duration) = limit {
self.park.park_timeout(duration)?;
} else {
self.park.park()?;
}
}
}
// Process pending timers after waking up
self.handle.process();
Ok(())
}
}
impl Handle {
/// Runs timer related logic, and returns the next wakeup time
pub(self) fn process(&self) {
let now = self.time_source().now();
self.process_at_time(now)
}
pub(self) fn process_at_time(&self, mut now: u64) {
let mut state = self.get().state.borrow_mut();
if now < state.elapsed {
// Time went backwards! This normally shouldn't happen as the Rust language
// guarantees that an Instant is monotonic, but can happen when running
// Linux in a VM on a Windows host due to std incorrectly trusting the
// hardware clock to be monotonic.
//
// See <https://github.com/tokio-rs/tokio/issues/3619> for more information.
now = state.elapsed;
}
while let Some(entry) = state.wheel.poll(now) {
if let Some(waker) = unsafe { entry.fire(Ok(())) } {
waker.wake();
}
}
state.elapsed = state.wheel.elapsed();
state.next_wake = state
.wheel
.poll_at()
.map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap()));
}
/// Removes a registered timer from the driver.
///
/// The timer will be moved to the cancelled state. Wakers will _not_ be
/// invoked. If the timer is already completed, this function is a no-op.
///
/// This function always acquires the driver lock, even if the entry does
/// not appear to be registered.
///
/// SAFETY: The timer must not be registered with some other driver, and
/// `add_entry` must not be called concurrently.
pub(self) unsafe fn clear_entry(&self, entry: NonNull<TimerShared>) {
unsafe {
let mut state = self.get().state.borrow_mut();
if entry.as_ref().might_be_registered() {
state.wheel.remove(entry);
}
entry.as_ref().handle().fire(Ok(()));
}
}
/// Removes and re-adds an entry to the driver.
///
/// SAFETY: The timer must be either unregistered, or registered with this
/// driver. No other threads are allowed to concurrently manipulate the
/// timer at all (the current thread should hold an exclusive reference to
/// the `TimerEntry`)
pub(self) unsafe fn reregister(&self, new_tick: u64, entry: NonNull<TimerShared>) {
let waker = unsafe {
let mut state = self.get().state.borrow_mut();
// We may have raced with a firing/deregistration, so check before
// deregistering.
if unsafe { entry.as_ref().might_be_registered() } {
state.wheel.remove(entry);
}
// Now that we have exclusive control of this entry, mint a handle to reinsert it.
let entry = entry.as_ref().handle();
entry.set_expiration(new_tick);
// Note: We don't have to worry about racing with some other resetting
// thread, because add_entry and reregister require exclusive control of
// the timer entry.
match unsafe { state.wheel.insert(entry) } {
Ok(_) => None,
Err((entry, super::error::InsertError::Elapsed)) => unsafe { entry.fire(Ok(())) },
}
};
// The timer was fired synchronously as a result of the reregistration.
// Wake the waker; this is needed because we might reset _after_ a poll,
// and otherwise the task won't be awoken to poll again.
if let Some(waker) = waker {
waker.wake();
}
}
}
impl<D> Driver for TimeDriver<D>
where
D: Driver + 'static,
{
fn with<R>(&self, f: impl FnOnce() -> R) -> R {
self.park.with(f)
}
fn submit(&self) -> io::Result<()> {
self.park.submit()
}
fn park(&self) -> io::Result<()> {
self.park_internal(None)
}
#[cfg(feature = "sync")]
type Unpark = D::Unpark;
fn park_timeout(&self, duration: Duration) -> io::Result<()> {
self.park_internal(Some(duration))
}
#[cfg(feature = "sync")]
fn unpark(&self) -> Self::Unpark {
self.park.unpark()
}
}
impl<D> Drop for TimeDriver<D>
where
D: Driver + 'static,
{
fn drop(&mut self) {
// self.shutdown();
}
}
// ===== impl Inner =====
impl Inner {
pub(self) fn new(time_source: ClockTime) -> Self {
Inner {
state: RefCell::new(InnerState {
time_source,
elapsed: 0,
next_wake: None,
wheel: wheel::Wheel::new(),
}),
}
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Inner").finish()
}
}
| 33.021053 | 103 | 0.606391 |
eb437531af29cbfe4d961e5e86edfb895d2c50d3 | 18,830 | // Copyright 2020 Andy Grove
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The DataFrame API is the main entry point into Ballista.
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use crate::arrow::datatypes::{DataType, Schema};
use crate::arrow::record_batch::RecordBatch;
pub use crate::datafusion::datasource::csv::CsvReadOptions;
use crate::datafusion::datasource::parquet::ParquetTable;
use crate::datafusion::datasource::TableProvider;
use crate::datafusion::logicalplan::ScalarValue;
use crate::datafusion::logicalplan::{exprlist_to_fields, Operator};
use crate::datafusion::logicalplan::{Expr, FunctionMeta, LogicalPlan, LogicalPlanBuilder};
use crate::datafusion::sql::parser::DFParser;
use crate::datafusion::sql::planner::{SchemaProvider, SqlToRel};
use crate::distributed::client;
use crate::error::{ballista_error, BallistaError, Result};
use crate::execution::physical_plan::Action;
pub const CSV_READER_BATCH_SIZE: &str = "ballista.csv.reader.batchSize";
pub const PARQUET_READER_BATCH_SIZE: &str = "ballista.parquet.reader.batchSize";
/// Configuration setting
#[derive(Debug, Clone)]
struct ConfigSetting {
key: String,
description: String,
data_type: DataType,
default_value: Option<String>,
}
impl ConfigSetting {
pub fn new(
key: &str,
description: &str,
data_type: DataType,
default_value: Option<&str>,
) -> Self {
Self {
key: key.to_owned(),
description: description.to_owned(),
data_type,
default_value: default_value.map(|s| s.to_owned()),
}
}
}
struct BallistaConfigs {
configs: HashMap<String, ConfigSetting>,
}
impl BallistaConfigs {
pub fn new() -> Self {
let mut configs = vec![];
configs.push(ConfigSetting::new(
CSV_READER_BATCH_SIZE,
"Number of rows to read per batch",
DataType::UInt64,
Some("65536"),
));
configs.push(ConfigSetting::new(
PARQUET_READER_BATCH_SIZE,
"Number of rows to read per batch",
DataType::UInt64,
Some("65536"),
));
let mut config_map: HashMap<String, ConfigSetting> = HashMap::new();
for config in &configs {
config_map.insert(config.key.to_owned(), config.to_owned());
}
Self {
configs: config_map,
}
}
pub fn validate(&self, settings: &HashMap<String, String>) -> Result<HashMap<String, String>> {
let mut checked_settings = settings.clone();
for config in &self.configs {
if !checked_settings.contains_key(config.0) {
match &config.1.default_value {
Some(default_value) => {
checked_settings.insert(config.0.to_string(), default_value.to_string());
}
None => {
return Err(ballista_error(&format!(
"Settings missing required config '{}'",
config.0
)))
}
}
}
// validate that any values are of the expected type
if let Some(value) = checked_settings.get(config.0) {
match config.1.data_type {
DataType::UInt64 => {
let _ = value.parse::<u64>().map_err(|e| {
ballista_error(&format!(
"Error parsing value {} for setting '{}' {:?}",
value, config.0, e
))
})?;
}
_ => return Err(ballista_error("unsupported data type for configs")),
}
}
}
Ok(checked_settings)
}
}
#[derive(Debug)]
pub struct ContextSchemaProvider {
pub temp_tables: HashMap<String, DataFrame>,
}
impl ContextSchemaProvider {
fn new() -> Self {
Self {
temp_tables: HashMap::new(),
}
}
}
impl ContextSchemaProvider {
pub fn register_temp_table(&mut self, name: &str, df: DataFrame) -> Result<()> {
self.temp_tables.insert(name.to_string(), df);
Ok(())
}
}
impl SchemaProvider for &ContextSchemaProvider {
fn get_table_meta(&self, name: &str) -> Option<Arc<Schema>> {
self.temp_tables
.get(name)
.map(|df| Arc::from(df.plan.schema().clone()))
}
fn get_function_meta(&self, _name: &str) -> Option<Arc<FunctionMeta>> {
// TODO: support udf
None
}
}
#[derive(Debug)]
pub struct Context {
pub state: Arc<ContextState>,
}
#[derive(Debug)]
pub struct ContextState {
pub schema_provider: RwLock<ContextSchemaProvider>,
pub backend: ContextBackend,
}
#[derive(Debug, Clone)]
pub enum ContextBackend {
Remote {
host: String,
port: usize,
settings: HashMap<String, String>,
},
Spark {
master: String,
spark_settings: HashMap<String, String>,
},
}
impl Context {
/// Create a context for executing a query against a remote Spark executor
pub fn spark(master: &str, settings: HashMap<&str, &str>) -> Self {
Self {
state: Arc::new(ContextState {
schema_provider: RwLock::new(ContextSchemaProvider::new()),
backend: ContextBackend::Spark {
master: master.to_owned(),
spark_settings: parse_settings(settings),
},
}),
}
}
/// Create a context for executing a query against a remote executor
pub fn remote(host: &str, port: usize, settings: HashMap<&str, &str>) -> Self {
Self {
state: Arc::new(ContextState {
schema_provider: RwLock::new(ContextSchemaProvider::new()),
backend: ContextBackend::Remote {
host: host.to_owned(),
port,
settings: parse_settings(settings),
},
}),
}
}
pub fn from(state: Arc<ContextState>) -> Self {
Self { state }
}
/// Create a DataFrame from an existing set of RecordBatch instances
pub fn create_dataframe(&self, batches: &[RecordBatch]) -> Result<DataFrame> {
let schema = batches[0].schema();
let plan = LogicalPlan::InMemoryScan {
data: vec![batches.to_vec()],
schema: Box::new(schema.as_ref().clone()),
projection: None,
projected_schema: Box::new(schema.as_ref().clone()),
};
Ok(DataFrame::from(self.state.clone(), plan))
}
pub fn read_csv(&self, path: &str, options: CsvReadOptions) -> Result<DataFrame> {
Ok(DataFrame::scan_csv(self.state.clone(), path, options)?)
}
pub fn read_parquet(&self, path: &str) -> Result<DataFrame> {
Ok(DataFrame::scan_parquet(self.state.clone(), path)?)
}
pub fn sql(&self, sql: &str) -> Result<DataFrame> {
let statements = DFParser::parse_sql(sql)?;
if statements.len() != 1 {
return Err(BallistaError::NotImplemented(format!(
"The dataframe currently only supports a single SQL statement",
)));
}
let plan = SqlToRel::new(&*self.state.schema_provider.read().unwrap())
.statement_to_plan(&statements[0])?;
Ok(DataFrame::from(self.state.clone(), plan))
}
pub fn register_temp_table(&mut self, name: &str, df: DataFrame) -> Result<()> {
let mut provider = self.state.schema_provider.write().unwrap();
provider.register_temp_table(name, df)?;
Ok(())
}
pub async fn execute_action(
&self,
host: &str,
port: usize,
action: Action,
) -> Result<Vec<RecordBatch>> {
client::execute_action(host, port, &action).await
}
}
fn parse_settings(settings: HashMap<&str, &str>) -> HashMap<String, String> {
let mut s: HashMap<String, String> = HashMap::new();
for (k, v) in settings {
s.insert(k.to_owned(), v.to_owned());
}
s
}
/// Builder for logical plans
#[derive(Clone, Debug)]
pub struct DataFrame {
ctx_state: Arc<ContextState>,
plan: LogicalPlan,
}
impl DataFrame {
/// Create a builder from an existing plan
pub fn from(ctx_state: Arc<ContextState>, plan: LogicalPlan) -> Self {
Self { ctx_state, plan }
}
/// Create an empty relation
pub fn empty(ctx_state: Arc<ContextState>) -> Self {
Self::from(
ctx_state,
LogicalPlan::EmptyRelation {
schema: Box::new(Schema::empty()),
},
)
}
/// Scan a data source
pub fn scan_csv(
ctx_state: Arc<ContextState>,
path: &str,
options: CsvReadOptions,
) -> Result<Self> {
Ok(Self::from(
ctx_state,
LogicalPlanBuilder::scan_csv(path, options, None)?.build()?,
))
}
/// Scan a data source
pub fn scan_parquet(ctx_state: Arc<ContextState>, path: &str) -> Result<Self> {
let p = ParquetTable::try_new(path)?;
let schema = p.schema().as_ref().to_owned();
Ok(Self::from(
ctx_state,
LogicalPlan::ParquetScan {
path: path.to_owned(),
schema: Box::new(schema.clone()),
projection: None,
projected_schema: Box::new(schema),
},
))
}
/// Apply a projection
pub fn project(&self, expr: Vec<Expr>) -> Result<DataFrame> {
let input_schema = self.plan.schema();
let projected_expr = if expr.contains(&Expr::Wildcard) {
let mut expr_vec = vec![];
(0..expr.len()).for_each(|i| match &expr[i] {
Expr::Wildcard => {
input_schema
.fields()
.iter()
.for_each(|f| expr_vec.push(Expr::Column(f.name().clone())));
}
_ => expr_vec.push(expr[i].clone()),
});
expr_vec
} else {
expr
};
let schema = Schema::new(exprlist_to_fields(&projected_expr, input_schema)?);
let df = Self::from(
self.ctx_state.clone(),
LogicalPlan::Projection {
expr: projected_expr,
input: Box::new(self.plan.clone()),
schema: Box::new(schema),
},
);
Ok(df)
}
/// Apply a filter
pub fn filter(&self, expr: Expr) -> Result<DataFrame> {
Ok(Self::from(
self.ctx_state.clone(),
LogicalPlan::Selection {
expr,
input: Box::new(self.plan.clone()),
},
))
}
/// Apply a sort
pub fn sort(&self, expr: Vec<Expr>) -> Result<DataFrame> {
Ok(Self::from(
self.ctx_state.clone(),
LogicalPlan::Sort {
expr,
input: Box::new(self.plan.clone()),
schema: self.plan.schema().clone(),
},
))
}
/// Apply a limit
pub fn limit(&self, n: usize) -> Result<DataFrame> {
Ok(Self::from(
self.ctx_state.clone(),
LogicalPlan::Limit {
n,
input: Box::new(self.plan.clone()),
schema: self.plan.schema().clone(),
},
))
}
/// Apply an aggregate
pub fn aggregate(&self, group_expr: Vec<Expr>, aggr_expr: Vec<Expr>) -> Result<DataFrame> {
let mut all_fields: Vec<Expr> = group_expr.clone();
aggr_expr.iter().for_each(|x| all_fields.push(x.clone()));
let aggr_schema = Schema::new(exprlist_to_fields(&all_fields, self.plan.schema())?);
Ok(Self::from(
self.ctx_state.clone(),
LogicalPlan::Aggregate {
input: Box::new(self.plan.clone()),
group_expr,
aggr_expr,
schema: Box::new(aggr_schema),
},
))
}
pub fn explain(&self) {
println!("{:?}", self.plan);
}
pub async fn collect(&self) -> Result<Vec<RecordBatch>> {
match &self.ctx_state.backend {
ContextBackend::Spark { spark_settings, .. } => {
let host = &spark_settings["spark.ballista.host"];
let port = &spark_settings["spark.ballista.port"];
let action = Action::InteractiveQuery {
plan: self.plan.clone(),
settings: spark_settings.clone(),
};
Context::from(self.ctx_state.clone())
.execute_action(host, port.parse::<usize>().unwrap(), action)
.await
}
ContextBackend::Remote {
host,
port,
settings,
} => {
let configs = BallistaConfigs::new();
let settings = configs.validate(settings)?;
let action = Action::InteractiveQuery {
plan: self.plan.clone(),
settings,
};
Context::from(self.ctx_state.clone())
.execute_action(host, *port, action)
.await
}
}
}
#[allow(clippy::match_single_binding)]
pub fn write_csv(&self, _path: &str) -> Result<()> {
match &self.ctx_state.backend {
other => Err(BallistaError::NotImplemented(format!(
"write_csv() is not implemented for {:?} yet",
other
))),
}
}
#[allow(clippy::match_single_binding)]
pub fn write_parquet(&self, _path: &str) -> Result<()> {
match &self.ctx_state.backend {
other => Err(BallistaError::NotImplemented(format!(
"write_parquet() is not implemented for {:?} yet",
other
))),
}
}
pub fn schema(&self) -> &Schema {
self.plan.schema()
}
pub fn logical_plan(&self) -> &LogicalPlan {
&self.plan
}
}
pub fn min(expr: Expr) -> Expr {
aggregate_expr("MIN", &expr)
}
pub fn max(expr: Expr) -> Expr {
aggregate_expr("MAX", &expr)
}
pub fn sum(expr: Expr) -> Expr {
aggregate_expr("SUM", &expr)
}
pub fn avg(expr: Expr) -> Expr {
aggregate_expr("AVG", &expr)
}
pub fn count(expr: Expr) -> Expr {
aggregate_expr("COUNT", &expr)
}
/// Create a column expression based on a column name
pub fn col(name: &str) -> Expr {
Expr::Column(name.to_owned())
}
pub fn alias(expr: &Expr, name: &str) -> Expr {
Expr::Alias(Box::new(expr.to_owned()), name.to_owned())
}
pub fn add(l: &Expr, r: &Expr) -> Expr {
binary_expr(l, Operator::Plus, r)
}
pub fn subtract(l: &Expr, r: &Expr) -> Expr {
binary_expr(l, Operator::Minus, r)
}
pub fn mult(l: &Expr, r: &Expr) -> Expr {
binary_expr(l, Operator::Multiply, r)
}
pub fn div(l: &Expr, r: &Expr) -> Expr {
binary_expr(l, Operator::Divide, r)
}
fn binary_expr(l: &Expr, op: Operator, r: &Expr) -> Expr {
Expr::BinaryExpr {
left: Box::new(l.to_owned()),
op,
right: Box::new(r.to_owned()),
}
}
/// Create a literal string expression
pub fn lit_str(str: &str) -> Expr {
Expr::Literal(ScalarValue::Utf8(str.to_owned()))
}
/// Create a literal i8 expression
pub fn lit_i8(n: i8) -> Expr {
Expr::Literal(ScalarValue::Int8(n))
}
/// Create a literal i16 expression
pub fn lit_i16(n: i16) -> Expr {
Expr::Literal(ScalarValue::Int16(n))
}
/// Create a literal i32 expression
pub fn lit_i32(n: i32) -> Expr {
Expr::Literal(ScalarValue::Int32(n))
}
/// Create a literal i64 expression
pub fn lit_i64(n: i64) -> Expr {
Expr::Literal(ScalarValue::Int64(n))
}
/// Create a literal u8 expression
pub fn lit_u8(n: u8) -> Expr {
Expr::Literal(ScalarValue::UInt8(n))
}
/// Create a literal u16 expression
pub fn lit_u16(n: u16) -> Expr {
Expr::Literal(ScalarValue::UInt16(n))
}
/// Create a literal u32 expression
pub fn lit_u32(n: u32) -> Expr {
Expr::Literal(ScalarValue::UInt32(n))
}
/// Create a literal u64 expression
pub fn lit_u64(n: u64) -> Expr {
Expr::Literal(ScalarValue::UInt64(n))
}
/// Create a literal f32 expression
pub fn lit_f32(n: f32) -> Expr {
Expr::Literal(ScalarValue::Float32(n))
}
/// Create a literal f64 expression
pub fn lit_f64(n: f64) -> Expr {
Expr::Literal(ScalarValue::Float64(n))
}
/// Create an expression to represent a named aggregate function
pub fn aggregate_expr(name: &str, expr: &Expr) -> Expr {
let return_type = DataType::Float64;
Expr::AggregateFunction {
name: name.to_string(),
args: vec![expr.clone()],
return_type,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty_settings() -> Result<()> {
let my_settings = HashMap::new();
let configs = BallistaConfigs::new();
let my_settings = configs.validate(&my_settings)?;
assert_eq!(my_settings[PARQUET_READER_BATCH_SIZE], "65536");
Ok(())
}
#[test]
fn custom_setting() -> Result<()> {
let mut my_settings: HashMap<String, String> = HashMap::new();
my_settings.insert(PARQUET_READER_BATCH_SIZE.to_owned(), "1234".to_owned());
let configs = BallistaConfigs::new();
let my_settings = configs.validate(&my_settings)?;
assert_eq!(my_settings[PARQUET_READER_BATCH_SIZE], "1234");
Ok(())
}
#[test]
fn invalid_setting() -> Result<()> {
let mut my_settings: HashMap<String, String> = HashMap::new();
my_settings.insert(
PARQUET_READER_BATCH_SIZE.to_owned(),
"twenty gigs".to_owned(),
);
let configs = BallistaConfigs::new();
match configs.validate(&my_settings) {
Err(e) => assert_eq!("General error: Error parsing value twenty gigs for setting 'ballista.parquet.reader.batchSize' ParseIntError { kind: InvalidDigit }", e.to_string()),
_ => return Err(ballista_error("validation failed"))
}
Ok(())
}
}
| 29.56044 | 183 | 0.557302 |
28ffcd8941eee46144e1bb32fc00efce604d1ea1 | 30,405 | #[macro_use]
extern crate pretty_assertions;
use diesel::connection::Connection;
use diesel::pg::PgConnection;
use std::convert::TryInto;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
use graph::mock::*;
use graph::prelude::*;
use graph_chain_ethereum::network_indexer::{
self as network_indexer, BlockWithOmmers, NetworkIndexerEvent,
};
use graph_core::MetricsRegistry;
use graph_store_postgres::Store as DieselStore;
use web3::types::{H256, H64};
use test_store::*;
// Helper macros to define indexer events.
macro_rules! add_block {
($chain:expr, $n:expr) => {{
NetworkIndexerEvent::AddBlock($chain[$n].inner().into())
}};
}
macro_rules! revert {
($from_chain:expr, $from_n:expr => $to_chain:expr, $to_n:expr) => {{
NetworkIndexerEvent::Revert {
from: $from_chain[$from_n].inner().into(),
to: $to_chain[$to_n].inner().into(),
}
}};
}
// Helper to wipe the store clean.
fn remove_test_data(store: Arc<DieselStore>) {
let url = postgres_test_url();
let conn = PgConnection::establish(url.as_str()).expect("Failed to connect to Postgres");
graph_store_postgres::store::delete_all_entities_for_test_use_only(&store, &conn)
.expect("Failed to remove entity test data");
}
// Helper to run network indexer against test chains.
fn run_network_indexer(
store: Arc<DieselStore>,
start_block: Option<EthereumBlockPointer>,
chains: Vec<Vec<BlockWithOmmers>>,
timeout: Duration,
) -> impl Future<
Item = (
Arc<Mutex<Chains>>,
impl Future<Item = Vec<NetworkIndexerEvent>, Error = ()>,
),
Error = (),
> {
// Simulate an Ethereum network using a mock adapter
let (adapter, chains) = create_mock_ethereum_adapter(chains);
let subgraph_name = SubgraphName::new("ethereum/testnet").unwrap();
let logger = LOGGER.clone();
let prometheus_registry = Arc::new(Registry::new());
let metrics_registry = Arc::new(MetricsRegistry::new(logger.clone(), prometheus_registry));
// Create the network indexer
let mut indexer = network_indexer::NetworkIndexer::new(
&logger,
adapter,
store.clone(),
metrics_registry,
subgraph_name.to_string(),
start_block,
);
let (event_sink, event_stream) = futures::sync::mpsc::channel(100);
// Run network indexer and forward its events to the channel
graph::spawn(
indexer
.take_event_stream()
.expect("failed to take stream from indexer")
.map_err(|_| ())
.forward(event_sink.sink_map_err(|_| ()))
.map(|_| ())
.timeout(timeout),
);
future::ok((chains, event_stream.collect()))
}
// Helper to run tests against a clean store.
fn run_test<R, F>(test: F)
where
F: FnOnce(Arc<DieselStore>) -> R + Send + 'static,
R: IntoFuture<Item = ()> + Send + 'static,
R::Error: Send + Debug,
R::Future: Send,
{
let store = STORE.clone();
// Lock regardless of poisoning. This also forces sequential test execution.
let mut runtime = match STORE_RUNTIME.lock() {
Ok(guard) => guard,
Err(err) => err.into_inner(),
};
runtime
.block_on(
future::lazy(move || {
// Reset store before running
remove_test_data(store.clone());
// Run test
test(store.clone())
})
.compat(),
)
.expect("failed to run test with clean store");
}
// Helper to create a sequence of linked blocks.
fn create_chain(n: u64, parent: Option<&BlockWithOmmers>) -> Vec<BlockWithOmmers> {
let start = parent.map_or(0, |block| block.inner().number.unwrap().as_u64() + 1);
(start..start + n).fold(vec![], |mut blocks, number| {
let mut block = BlockWithOmmers::default();
// Set required fields
block.block.block.nonce = Some(H64::random());
block.block.block.mix_hash = Some(H256::random());
// Use the index as the block number
block.block.block.number = Some(number.into());
// Use a random hash as the block hash (should be unique)
block.block.block.hash = Some(H256::random());
if number == start {
// Set the parent hash for the first block only if a
// parent was passed in; otherwise we're dealing with
// the genesis block
if let Some(parent_block) = parent {
block.block.block.parent_hash = parent_block.inner().hash.unwrap().clone();
}
} else {
// Set the parent hash for all blocks but the genesis block
block.block.block.parent_hash =
blocks.last().unwrap().block.block.hash.clone().unwrap();
}
blocks.push(block);
blocks
})
}
fn create_fork(
original_blocks: Vec<BlockWithOmmers>,
base: u64,
total: u64,
) -> Vec<BlockWithOmmers> {
let mut blocks = original_blocks[0..(base as usize) + 1].to_vec();
let new_blocks = create_chain((total - base - 1).try_into().unwrap(), blocks.last());
blocks.extend(new_blocks);
blocks
}
struct Chains {
current_chain_index: usize,
chains: Vec<Vec<BlockWithOmmers>>,
}
impl Chains {
pub fn new(chains: Vec<Vec<BlockWithOmmers>>) -> Self {
Self {
current_chain_index: 0,
chains,
}
}
pub fn index(&self) -> usize {
self.current_chain_index
}
pub fn current_chain(&self) -> Option<&Vec<BlockWithOmmers>> {
self.chains.get(self.current_chain_index)
}
pub fn advance_to_next_chain(&mut self) {
self.current_chain_index += 1;
}
}
fn create_mock_ethereum_adapter(
chains: Vec<Vec<BlockWithOmmers>>,
) -> (Arc<MockEthereumAdapter>, Arc<Mutex<Chains>>) {
let chains = Arc::new(Mutex::new(Chains::new(chains)));
// Create the mock Ethereum adapter.
let mut adapter = MockEthereumAdapter::new();
// Make it so that each time we poll a new remote head, we
// switch to the next version of the chain
let chains_for_latest_block = chains.clone();
adapter.expect_latest_block().returning(move |_: &Logger| {
let chains = chains_for_latest_block.lock().unwrap();
Box::new(future::result(
chains
.current_chain()
.ok_or_else(|| {
format_err!("exhausted chain versions used in this test; this is ok")
})
.and_then(|chain| chain.last().ok_or_else(|| format_err!("empty block chain")))
.map_err(Into::into)
.map(|block| block.block.block.clone()),
))
});
let chains_for_block_by_number = chains.clone();
adapter
.expect_block_by_number()
.returning(move |_, number: u64| {
let chains = chains_for_block_by_number.lock().unwrap();
Box::new(future::result(
chains
.current_chain()
.ok_or_else(|| format_err!("unknown chain {:?}", chains.index()))
.map(|chain| {
chain
.iter()
.find(|block| block.inner().number.unwrap().as_u64() == number)
.map(|block| block.clone().block.block)
}),
))
});
let chains_for_block_by_hash = chains.clone();
adapter
.expect_block_by_hash()
.returning(move |_, hash: H256| {
let chains = chains_for_block_by_hash.lock().unwrap();
Box::new(future::result(
chains
.current_chain()
.ok_or_else(|| format_err!("unknown chain {:?}", chains.index()))
.map(|chain| {
chain
.iter()
.find(|block| block.inner().hash.unwrap() == hash)
.map(|block| block.clone().block.block)
}),
))
});
let chains_for_load_full_block = chains.clone();
adapter
.expect_load_full_block()
.returning(move |_, block: LightEthereumBlock| {
let chains = chains_for_load_full_block.lock().unwrap();
Box::new(future::result(
chains
.current_chain()
.ok_or_else(|| format_err!("unknown chain {:?}", chains.index()))
.map_err(Into::into)
.map(|chain| {
chain
.iter()
.find(|b| b.inner().number.unwrap() == block.number.unwrap())
.expect(
format!(
"full block {} [{:x}] not found",
block.number.unwrap(),
block.hash.unwrap()
)
.as_str(),
)
.clone()
.block
}),
))
});
// For now return no ommers
let chains_for_ommers = chains.clone();
adapter
.expect_uncles()
.returning(move |_, block: &LightEthereumBlock| {
let chains = chains_for_ommers.lock().unwrap();
Box::new(future::result(
chains
.current_chain()
.ok_or_else(|| format_err!("unknown chain {:?}", chains.index()))
.map_err(Into::into)
.map(|chain| {
chain
.iter()
.find(|b| b.inner().hash.unwrap() == block.hash.unwrap())
.expect(
format!(
"block #{} ({:x}) not found",
block.number.unwrap(),
block.hash.unwrap()
)
.as_str(),
)
.clone()
.ommers
.into_iter()
.map(|ommer| Some((*ommer).clone()))
.collect::<Vec<_>>()
}),
))
});
(Arc::new(adapter), chains)
}
// GIVEN a fresh subgraph (local head = none)
// AND a chain with 10 blocks
// WHEN indexing the network
// EXPECT 10 `AddBlock` events are emitted, one for each block
#[test]
fn indexing_starts_at_genesis() {
run_test(|store: Arc<DieselStore>| {
// Create test chain
let chain = create_chain(10, None);
let chains = vec![chain.clone()];
// Run network indexer and collect its events
run_network_indexer(store, None, chains, Duration::from_secs(1)).and_then(
move |(_, events)| {
events.and_then(move |events| {
// Assert that the events emitted by the indexer match all
// blocks _after_ block #2 (because the network subgraph already
// had that one)
assert_eq!(
events,
(0..10).map(|n| add_block!(chain, n)).collect::<Vec<_>>()
);
Ok(())
})
},
)
});
}
// GIVEN an existing subgraph (local head = block #2)
// AND a chain with 10 blocks
// WHEN indexing the network
// EXPECT 7 `AddBlock` events are emitted, one for each remaining block
#[test]
fn indexing_resumes_from_local_head() {
run_test(|store: Arc<DieselStore>| {
// Create test chain
let chain = create_chain(10, None);
let chains = vec![chain.clone()];
// Run network indexer and collect its events
run_network_indexer(
store,
Some(chain[2].inner().into()),
chains,
Duration::from_secs(1),
)
.and_then(move |(_, events)| {
events.and_then(move |events| {
// Assert that the events emitted by the indexer are only
// for the blocks #3-#9.
assert_eq!(
events,
(3..10).map(|n| add_block!(chain, n)).collect::<Vec<_>>()
);
Ok(())
})
})
});
}
// GIVEN a fresh subgraph (local head = none)
// AND a chain with 10 blocks
// WHEN indexing the network
// EXPECT 10 `AddBlock` events are emitted, one for each block
#[test]
fn indexing_picks_up_new_remote_head() {
run_test(|store: Arc<DieselStore>| {
// The first time we pull the remote head, there are 10 blocks
let chain_10 = create_chain(10, None);
// The second time we pull the remote head, there are 20 blocks;
// the first 10 blocks are identical to before, so this simulates
// 10 new blocks being added to the same chain
let chain_20 = create_fork(chain_10.clone(), 9, 20);
// The third time we pull the remote head, there are 50 blocks;
// the first 20 blocks are identical to before
let chain_50 = create_fork(chain_20.clone(), 19, 50);
// Use the two above chains in the test
let chains = vec![chain_10.clone(), chain_20.clone(), chain_50.clone()];
// Run network indexer and collect its events
run_network_indexer(store, None, chains, Duration::from_secs(4)).and_then(
move |(chains, events)| {
thread::spawn(move || {
// Create the first chain update after 1s
{
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
}
// Create the second chain update after 3s
{
thread::sleep(Duration::from_secs(2));
chains.lock().unwrap().advance_to_next_chain();
}
});
events.and_then(move |events| {
// Assert that the events emitted by the indexer match the blocks 1:1,
// despite them requiring two remote head updates
assert_eq!(
events,
(0..50).map(|n| add_block!(chain_50, n)).collect::<Vec<_>>(),
);
Ok(())
})
},
)
});
}
// GIVEN a fresh subgraph (local head = none)
// AND a chain with 10 blocks with a gap (#6 missing)
// WHEN indexing the network
// EXPECT only `AddBlock` events for blocks #0-#5 are emitted
#[test]
fn indexing_does_not_move_past_a_gap() {
run_test(|store: Arc<DieselStore>| {
// Create test chain
let mut blocks = create_chain(10, None);
// Remove block #6
blocks.remove(5);
let chains = vec![blocks.clone()];
// Run network indexer and collect its events
run_network_indexer(store, None, chains, Duration::from_secs(1)).and_then(
move |(_, events)| {
events.and_then(move |events| {
// Assert that only blocks #0 - #4 were indexed and nothing more
assert_eq!(
events,
(0..5).map(|n| add_block!(blocks, n)).collect::<Vec<_>>()
);
Ok(())
})
},
)
});
}
// GIVEN a fresh subgraph (local head = none)
// AND 10 blocks for one version of the chain
// AND 11 blocks for a fork of the chain that starts after block #8
// WHEN indexing the network
// EXPECT 10 `AddBlock` events are emitted for the first branch,
// 1 `Revert` event is emitted to revert back to block #8
// 2 `AddBlock` events are emitted for blocks #9-#10 of the fork
#[test]
fn indexing_handles_single_block_reorg() {
run_test(|store: Arc<DieselStore>| {
// Create the initial chain
let initial_chain = create_chain(10, None);
// Create a forked chain after block #8
let forked_chain = create_fork(initial_chain.clone(), 8, 11);
// Run the network indexer and collect its events
let chains = vec![initial_chain.clone(), forked_chain.clone()];
run_network_indexer(store, None, chains, Duration::from_secs(2)).and_then(
move |(chains, events)| {
// Trigger the reorg after 1s
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
});
events.and_then(move |events| {
assert_eq!(
events,
// The 10 `AddBlock` events for the initial version of the chain
(0..10)
.map(|n| add_block!(initial_chain, n))
// The 1 `Revert` event to go back to #8
.chain(vec![revert!(initial_chain, 9 => initial_chain, 8)])
// The 2 `AddBlock` events for the new chain
.chain((9..11).map(|n| add_block!(forked_chain, n)))
.collect::<Vec<_>>()
);
Ok(())
})
},
)
});
}
// GIVEN a fresh subgraph (local head = none)
// AND 10 blocks for one version of the chain
// AND 20 blocks for a fork of the chain that starts after block #2
// WHEN indexing the network
// EXPECT 10 `AddBlock` events are emitted for the first branch,
// 7 `Revert` events are emitted to revert back to block #2
// 17 `AddBlock` events are emitted for blocks #3-#20 of the fork
#[test]
fn indexing_handles_simple_reorg() {
run_test(|store: Arc<DieselStore>| {
// Create the initial chain
let initial_chain = create_chain(10, None);
// Create a forked chain after block #2
let forked_chain = create_fork(initial_chain.clone(), 2, 20);
// Run the network indexer and collect its events
let chains = vec![initial_chain.clone(), forked_chain.clone()];
run_network_indexer(store, None, chains, Duration::from_secs(2)).and_then(
move |(chains, events)| {
// Trigger a reorg after 1s
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
});
events.and_then(move |events| {
assert_eq!(
events,
// - 10 `AddBlock` events for blocks #0 to #9 of the initial chain
(0..10)
.map(|n| add_block!(initial_chain, n))
// - 7 `Revert` events from #9 to #8, ..., #3 to #2 (the fork base)
.chain(
vec![9, 8, 7, 6, 5, 4, 3]
.into_iter()
.map(|n| revert!(initial_chain, n => initial_chain, n-1))
)
// 17 `AddBlock` events for the new chain
.chain((3..20).map(|n| add_block!(forked_chain, n)))
.collect::<Vec<_>>()
);
Ok(())
})
},
)
});
}
// GIVEN a fresh subgraph (local head = none)
// AND 10 blocks for the initial chain
// AND 20 blocks for a fork of the initial chain that starts after block #2
// AND 30 blocks for a fork of the initial chain that starts after block #2
// WHEN indexing the network
// EXPECT 10 `AddBlock` events are emitted for the first branch,
// 7 `Revert` events are emitted to revert back to block #2
// 17 `AddBlock` events are emitted for blocks #4-#20 of the fork
// 7 `Revert` events are emitted to revert back to block #2
// 17 `AddBlock` events are emitted for blocks #4-#20 of the fork
#[test]
fn indexing_handles_consecutive_reorgs() {
run_test(|store: Arc<DieselStore>| {
// Create the initial chain
let initial_chain = create_chain(10, None);
// Create a forked chain after block #2
let second_chain = create_fork(initial_chain.clone(), 2, 20);
// Create a forked chain after block #3
let third_chain = create_fork(initial_chain.clone(), 2, 30);
// Run the network indexer for 10s and collect its events
let chains = vec![
initial_chain.clone(),
second_chain.clone(),
third_chain.clone(),
];
run_network_indexer(store, None, chains, Duration::from_secs(6)).and_then(
move |(chains, events)| {
thread::spawn(move || {
// Trigger the first reorg after 2s
{
thread::sleep(Duration::from_secs(2));
chains.lock().unwrap().advance_to_next_chain();
}
// Trigger the second reorg after 4s
{
thread::sleep(Duration::from_secs(2));
chains.lock().unwrap().advance_to_next_chain();
}
});
events.and_then(move |events| {
assert_eq!(
events,
// The 10 add block events for the initial version of the chain
(0..10)
.map(|n| add_block!(initial_chain, n))
// The 7 revert events to go back to #2
.chain(
vec![9, 8, 7, 6, 5, 4, 3]
.into_iter()
.map(|n| revert!(initial_chain, n => initial_chain, n-1))
)
// The 17 add block events for the new chain
.chain((3..20).map(|n| add_block!(second_chain, n)))
// The 17 revert events to go back to #2
.chain(
vec![19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3]
.into_iter()
.map(|n| revert!(second_chain, n => second_chain, n-1))
)
// The 27 add block events for the third chain
.chain((3..30).map(|n| add_block!(third_chain, n)))
.collect::<Vec<_>>()
);
Ok(())
})
},
)
});
}
// GIVEN a fresh subgraph (local head = none)
// AND 5 blocks for one version of the chain (#0 - #4)
// AND a fork with blocks #0 - #3, #4', #5'
// AND a fork with blocks #0 - #3, #4, #5'', #6''
// WHEN indexing the network
// EXPECT 5 `AddBlock` events are emitted for the first chain version,
// 1 `Revert` event is emitted from block #4 to #3
// 2 `AddBlock` events are emitted for blocks #4', #5'
// 2 `Revert` events are emitted from block #5' to #4' and #4' to #3
// 3 `AddBlock` events are emitted for blocks #4, #5'', #6''
#[test]
fn indexing_handles_reorg_back_and_forth() {
run_test(|store: Arc<DieselStore>| {
// Create the initial chain (blocks #0 - #4)
let initial_chain = create_chain(5, None);
// Create fork 1 (blocks #0 - #3, #4', #5')
let fork1 = create_fork(initial_chain.clone(), 3, 6);
// Create fork 2 (blocks #0 - #4, #5'', #6'');
// this fork includes the original #4 again, which at this point should
// no longer be in the store and therefor not be considered as the
// common ancestor of the fork (that should be #3).
let fork2 = create_fork(initial_chain.clone(), 4, 7);
// Run the network indexer and collect its events
let chains = vec![initial_chain.clone(), fork1.clone(), fork2.clone()];
run_network_indexer(store, None, chains, Duration::from_secs(3)).and_then(
move |(chains, events)| {
thread::spawn(move || {
// Trigger the first reorg after 1s
{
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
}
// Trigger the second reorg after 2s
{
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
}
});
events.and_then(move |events| {
assert_eq!(
events,
vec![
add_block!(initial_chain, 0),
add_block!(initial_chain, 1),
add_block!(initial_chain, 2),
add_block!(initial_chain, 3),
add_block!(initial_chain, 4),
revert!(initial_chain, 4 => initial_chain, 3),
add_block!(fork1, 4),
add_block!(fork1, 5),
revert!(fork1, 5 => fork1, 4),
revert!(fork1, 4 => initial_chain, 3),
add_block!(fork2, 4),
add_block!(fork2, 5),
add_block!(fork2, 6)
]
);
Ok(())
})
},
)
});
}
// Test that ommer blocks are not confused with reguar blocks when finding
// common ancestors for reorgs. There was a bug initially where that would
// happen, because any block that was in the store was considered to be on the
// local version of the chain. This assumption is false because ommers are
// stored as `Block` entities as well. To correctly identify the common
// ancestor in a reorg, traversing the old and new chains block by block
// through parent hashes is necessary.
//
// GIVEN a fresh subgraph (local head = none)
// AND 5 blocks for one version of the chain (#0 - #4)
// AND a fork with blocks #0 - #3, #4', #5'
// where block #5' has #4 as an ommer
// AND a fork with blocks #0 - #3, #4, #5'', #6''
// where the original #4 is included again
// WHEN indexing the network
// EXPECT 5 `AddBlock` events are emitted for the first chain version,
// 1 `Revert` event is emitted from block #4 to #3
// 2 `AddBlock` events are emitted for blocks #4', #5'
// 2 `Revert` events are emitted from block #5' to #4' and #4' to #3
// 3 `AddBlock` events are emitted for blocks #4, #5'', #6''
// block #3 is identified as the common ancestor in both reorgs
#[test]
fn indexing_identifies_common_ancestor_correctly_despite_ommers() {
run_test(|store: Arc<DieselStore>| {
// Create the initial chain (#0 - #4)
let initial_chain = create_chain(5, None);
// Create fork 1 (blocks #0 - #3, #4', #5')
let mut fork1 = create_fork(initial_chain.clone(), 3, 6);
// Make it so that #5' has #4 as an uncle
fork1[5].block.block.uncles = vec![initial_chain[4].inner().hash.clone().unwrap()];
fork1[5].ommers = vec![initial_chain[4].block.block.clone().into()];
// Create fork 2 (blocks #0 - #4, #5'', #6''); this fork includes the
// original #4 again, which at this point should no longer be part of
// the indexed chain in the store and therefor not be considered as the
// common ancestor of the fork (that should be #3). It is still in the
// store as an ommer (of #5', from fork1) but that ommer should not be
// picked as the common ancestor either.
let fork2 = create_fork(initial_chain.clone(), 4, 7);
// Run the network indexer and collect its events
let chains = vec![initial_chain.clone(), fork1.clone(), fork2.clone()];
run_network_indexer(store, None, chains, Duration::from_secs(3)).and_then(
move |(chains, events)| {
thread::spawn(move || {
// Trigger the first reorg after 1s
{
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
}
// Trigger the second reorg after 2s
{
thread::sleep(Duration::from_secs(1));
chains.lock().unwrap().advance_to_next_chain();
}
});
events.and_then(move |events| {
assert_eq!(
events,
vec![
add_block!(initial_chain, 0),
add_block!(initial_chain, 1),
add_block!(initial_chain, 2),
add_block!(initial_chain, 3),
add_block!(initial_chain, 4),
revert!(initial_chain, 4 => initial_chain, 3),
add_block!(fork1, 4),
add_block!(fork1, 5),
revert!(fork1, 5 => fork1, 4),
revert!(fork1, 4 => initial_chain, 3),
add_block!(fork2, 4),
add_block!(fork2, 5),
add_block!(fork2, 6)
]
);
Ok(())
})
},
)
});
}
| 38.585025 | 97 | 0.505772 |
ff797b68e253ca736c172bdf511fd338326fc396 | 2,165 | use crate::client::oracle_types::{OracleParameters, OraclePreVotes, OracleVotes};
use crate::{LCDResult, Terra};
pub struct Oracle<'a> {
terra: &'a Terra,
}
impl<'a> Oracle<'a> {
pub fn create(terra: &'a Terra) -> Oracle<'a> {
Oracle { terra }
}
pub async fn parameters(&self) -> anyhow::Result<LCDResult<OracleParameters>> {
let response = self
.terra
.send_cmd::<LCDResult<OracleParameters>>("/oracle/parameters", None)
.await?;
Ok(response)
}
pub fn voters(&self, validator: &'a str) -> Voters<'a> {
Voters::create(self.terra, validator)
}
}
pub struct Voters<'a> {
terra: &'a Terra,
pub validator: &'a str,
}
impl<'a> Voters<'a> {
pub fn create(terra: &'a Terra, validator: &'a str) -> Voters<'a> {
Voters { terra, validator }
}
pub async fn votes(&self) -> anyhow::Result<LCDResult<Vec<OracleVotes>>> {
let response = self
.terra
.send_cmd::<LCDResult<Vec<OracleVotes>>>(
&format!("/oracle/voters/{}/votes", &self.validator),
None,
)
.await?;
Ok(response)
}
pub async fn prevotes(&self) -> anyhow::Result<LCDResult<Vec<OraclePreVotes>>> {
let response = self
.terra
.send_cmd::<LCDResult<Vec<OraclePreVotes>>>(
&format!("/oracle/voters/{}/prevotes", &self.validator),
None,
)
.await?;
Ok(response)
}
pub async fn feeder(&self) -> anyhow::Result<LCDResult<String>> {
let response = self
.terra
.send_cmd::<LCDResult<String>>(
&format!("/oracle/voters/{}/feeder", &self.validator),
None,
)
.await?;
Ok(response)
}
pub async fn miss(&self) -> anyhow::Result<LCDResult<String>> {
let response = self
.terra
.send_cmd::<LCDResult<String>>(
&format!("/oracle/voters/{}/miss", &self.validator),
None,
)
.await?;
Ok(response)
}
}
| 30.069444 | 84 | 0.516859 |
ef24ff9847174d9501d6ed9bf0da7f4632668c3d | 3,689 | /*!
Super Rotation System, or SRS for rotating tetrominoes.
Based on https://tetris.wiki/SRS
*/
use ::{Point, Piece, Rot, Well, Player};
/// SRS offset data.
///
/// When the player desires to rotate the piece, this table is consulted for wall kicks.
pub struct SrsData {
cw: [[Point; 5]; 4],
ccw: [[Point; 5]; 4],
}
macro_rules! pt {
(($x:expr, $y:expr)) => { Point { x: $x, y: $y } };
}
macro_rules! srs {
(
$a:tt $b:tt $c:tt $d:tt $e:tt
$f:tt $g:tt $h:tt $i:tt $j:tt
$k:tt $l:tt $m:tt $n:tt $o:tt
$p:tt $q:tt $r:tt $s:tt $t:tt
) => {
[[pt!($a), pt!($b), pt!($c), pt!($d), pt!($e)],
[pt!($f), pt!($g), pt!($h), pt!($i), pt!($j)],
[pt!($k), pt!($l), pt!($m), pt!($n), pt!($o)],
[pt!($p), pt!($q), pt!($r), pt!($s), pt!($t)]]
}
}
/// SRS offsets for all but the I piece.
pub static SRS_DATA_JLSTZ: SrsData = SrsData {
cw: srs! {
( 0, 0) (-1, 0) (-1, 1) ( 0,-2) (-1,-2)
( 0, 0) ( 1, 0) ( 1,-1) ( 0, 2) ( 1, 2)
( 0, 0) ( 1, 0) ( 1, 1) ( 0,-2) ( 1,-2)
( 0, 0) (-1, 0) (-1,-1) ( 0, 2) (-1, 2)
},
ccw: srs! {
( 0, 0) ( 1, 0) ( 1, 1) ( 0,-2) ( 1,-2)
( 0, 0) (-1, 0) (-1,-1) ( 0, 2) (-1, 2)
( 0, 0) (-1, 0) (-1, 1) ( 0,-2) (-1,-2)
( 0, 0) ( 1, 0) ( 1,-1) ( 0, 2) ( 1, 2)
},
};
/// SRS offsets for the I piece.
pub static SRS_DATA_I: SrsData = SrsData {
cw: srs! {
( 0, 0) (-2, 0) ( 1, 0) (-2,-1) ( 1, 2)
( 0, 0) (-1, 0) ( 2, 0) (-1, 2) ( 2,-1)
( 0, 0) ( 2, 0) (-1, 0) ( 2, 1) (-1,-2)
( 0, 0) ( 1, 0) (-2, 0) ( 1,-2) (-2, 1)
},
ccw: srs! {
( 0, 0) (-1, 0) ( 2, 0) (-1, 2) ( 2,-1)
( 0, 0) (-2, 0) ( 1, 0) (-2,-1) ( 1, 2)
( 0, 0) ( 1, 0) (-2, 0) ( 1,-2) (-2, 1)
( 0, 0) ( 2, 0) (-1, 0) ( 2, 1) (-1,-2)
},
};
/*
/// SRS offsets for the I piece under Arika rules.
pub static SRS_DATA_ARIKA: SrsData = SrsData {
cw: srs! {
( 0, 0) (-2, 0) ( 1, 0) ( 1, 2) (-2,-1)
( 0, 0) (-1, 0) ( 2, 0) (-1, 2) ( 2,-1)
( 0, 0) ( 2, 0) (-1, 0) ( 2, 1) (-1,-1)
( 0, 0) (-2, 0) ( 1, 0) (-2, 1) ( 1,-2)
},
ccw: srs! {
( 0, 0) ( 2, 0) (-1, 0) (-1, 2) ( 2,-1)
( 0, 0) ( 1, 0) (-2, 0) ( 1, 2) (-2,-1)
( 0, 0) (-2, 0) ( 1, 0) (-2, 1) ( 1,-1)
( 0, 0) ( 2, 0) (-1, 0) ( 2, 1) (-1,-2)
},
};
*/
pub fn srs_data_cw(piece: Piece, rot: Rot) -> &'static [Point; 5] {
let src = if piece == Piece::I { &SRS_DATA_I } else { &SRS_DATA_JLSTZ };
&src.cw[rot as u8 as usize]
}
pub fn srs_data_ccw(piece: Piece, rot: Rot) -> &'static [Point; 5] {
let src = if piece == Piece::I { &SRS_DATA_I } else { &SRS_DATA_JLSTZ };
&src.ccw[rot as u8 as usize]
}
pub fn srs_cw(well: &Well, player: Player) -> Player {
let rotated = player.rotate_cw();
let sprite = rotated.sprite();
let kicks = srs_data_cw(player.piece, player.rot);
well.wall_kick(sprite, kicks, rotated.pt).map(|pt| Player::new(rotated.piece, rotated.rot, pt)).unwrap_or(player)
}
pub fn srs_ccw(well: &Well, player: Player) -> Player {
let rotated = player.rotate_ccw();
let sprite = rotated.sprite();
let kicks = srs_data_ccw(player.piece, player.rot);
well.wall_kick(sprite, kicks, rotated.pt).map(|pt| Player::new(rotated.piece, rotated.rot, pt)).unwrap_or(player)
}
#[cfg(test)]
mod tests {
use super::*;
use ::{Well, Player, Piece, Rot, Point};
#[test]
fn wall_kick_example() {
let well = Well::from_data(10, &[
0b0000000000,
0b0000110000,
0b0000011100,
0b0000001111,
0b0111000111,
0b1100001111,
0b1111001111,
0b1111101111,
]);
let initial = Player::new(Piece::J, Rot::Zero, Point::new(2, 5));
let player = srs_ccw(&well, initial);
let expected = Player::new(Piece::J, Rot::Left, Point::new(3, 3));
assert_eq!(expected, player);
}
}
| 28.376923 | 114 | 0.491732 |
dd50b0490ac8495d62df634001a211182faafef2 | 3,124 | use crate::{audio::Sounds, Ball, ScoreBoard};
use amethyst::{
assets::AssetStorage,
audio::{output::Output, Source},
core::Transform,
derive::SystemDesc,
ecs::{Entity, Join, Read, ReadExpect, System, SystemData, Write, WriteStorage},
ui::UiText,
};
/// This system is responsible for checking if a ball has moved into a left or
/// a right edge. Points are distributed to the player on the other side, and
/// the ball is reset.
#[derive(SystemDesc)]
pub struct WinnerSystem;
impl<'s> System<'s> for WinnerSystem {
type SystemData = (
WriteStorage<'s, Ball>,
WriteStorage<'s, Transform>,
WriteStorage<'s, UiText>,
Write<'s, ScoreBoard>,
Read<'s, AssetStorage<Source>>,
ReadExpect<'s, Sounds>,
ReadExpect<'s, ScoreText>,
Option<Read<'s, Output>>,
);
fn run(
&mut self,
(
mut balls,
mut transforms,
mut text,
mut score_board,
storage,
sounds,
score_text,
audio_output,
): Self::SystemData,
) {
for (ball, transform) in (&mut balls, &mut transforms).join() {
use crate::{ARENA_HEIGHT, ARENA_WIDTH};
let ball_x = transform.translation().x;
let did_hit = if ball_x <= ball.radius {
// Right player scored on the left side.
// We top the score at 999 to avoid text overlap.
score_board.score_right = (score_board.score_right + 1).min(999);
if let Some(text) = text.get_mut(score_text.p2_score) {
text.text = score_board.score_right.to_string();
}
true
} else if ball_x >= ARENA_WIDTH - ball.radius {
// Left player scored on the right side.
// We top the score at 999 to avoid text overlap.
score_board.score_left = (score_board.score_left + 1).min(999);
if let Some(text) = text.get_mut(score_text.p1_score) {
text.text = score_board.score_left.to_string();
}
true
} else {
false
};
if did_hit {
// Reset the ball.
ball.velocity[0] = -ball.velocity[0];
transform.set_translation_x(ARENA_WIDTH / 2.0);
transform.set_translation_y(ARENA_HEIGHT / 2.0);
// Print the score board.
println!(
"Score: | {:^3} | {:^3} |",
score_board.score_left, score_board.score_right
);
// Play audio.
if let Some(ref output) = audio_output {
if let Some(sound) = storage.get(&sounds.score_sfx) {
output.play_once(sound, 1.0);
}
}
}
}
}
}
/// Stores the entities that are displaying the player score with UiText.
pub struct ScoreText {
pub p1_score: Entity,
pub p2_score: Entity,
}
| 32.884211 | 83 | 0.522407 |
9b5da42313493e99a7fb0cee85e9496fa5d86ce7 | 3,017 | // Rust Fujicoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! File defines types for hashes used throughout the library. These types are needed in order
//! to avoid mixing data of the same hash format (like SHA256d) but of different meaning
//! (transaction id, block hash etc).
use hashes::{Hash, sha256, sha256d, hash160};
macro_rules! impl_hashencode {
($hashtype:ident) => {
impl $crate::consensus::Encodable for $hashtype {
fn consensus_encode<S: ::std::io::Write>(&self, s: S) -> Result<usize, ::std::io::Error> {
self.0.consensus_encode(s)
}
}
impl $crate::consensus::Decodable for $hashtype {
fn consensus_decode<D: ::std::io::Read>(d: D) -> Result<Self, $crate::consensus::encode::Error> {
use $crate::hashes::Hash;
Ok(Self::from_inner(<<$hashtype as $crate::hashes::Hash>::Inner>::consensus_decode(d)?))
}
}
}
}
hash_newtype!(Txid, sha256d::Hash, 32, doc="A fujicoin transaction hash/transaction ID.");
hash_newtype!(Wtxid, sha256d::Hash, 32, doc="A fujicoin witness transaction ID.");
hash_newtype!(BlockHash, sha256d::Hash, 32, doc="A fujicoin block hash.");
hash_newtype!(SigHash, sha256d::Hash, 32, doc="Hash of the transaction according to the signature algorithm");
hash_newtype!(PubkeyHash, hash160::Hash, 20, doc="A hash of a public key.");
hash_newtype!(ScriptHash, hash160::Hash, 20, doc="A hash of Fujicoin Script bytecode.");
hash_newtype!(WPubkeyHash, hash160::Hash, 20, doc="SegWit version of a public key hash.");
hash_newtype!(WScriptHash, sha256::Hash, 32, doc="SegWit version of a Fujicoin Script bytecode hash.");
hash_newtype!(TxMerkleNode, sha256d::Hash, 32, doc="A hash of the Merkle tree branch or root for transactions");
hash_newtype!(WitnessMerkleNode, sha256d::Hash, 32, doc="A hash corresponding to the Merkle tree root for witness data");
hash_newtype!(WitnessCommitment, sha256d::Hash, 32, doc="A hash corresponding to the witness structure commitment in the coinbase transaction");
hash_newtype!(XpubIdentifier, hash160::Hash, 20, doc="XpubIdentifier as defined in BIP-32.");
hash_newtype!(FilterHash, sha256d::Hash, 32, doc="Filter hash, as defined in BIP-157");
hash_newtype!(FilterHeader, sha256d::Hash, 32, doc="Filter header, as defined in BIP-157");
impl_hashencode!(Txid);
impl_hashencode!(Wtxid);
impl_hashencode!(SigHash);
impl_hashencode!(BlockHash);
impl_hashencode!(TxMerkleNode);
impl_hashencode!(WitnessMerkleNode);
impl_hashencode!(FilterHash);
impl_hashencode!(FilterHeader); | 47.140625 | 144 | 0.714617 |
010ff8f14c70252e10ad063e573c9664af4fbf11 | 16,791 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::INTEN {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct XCMPWRR {
bits: bool,
}
impl XCMPWRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct XCMPWFR {
bits: bool,
}
impl XCMPWFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct XCMPRRR {
bits: bool,
}
impl XCMPRRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct XCMPRFR {
bits: bool,
}
impl XCMPRFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IOINTWR {
bits: bool,
}
impl IOINTWR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct GENADR {
bits: bool,
}
impl GENADR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FRDERRR {
bits: bool,
}
impl FRDERRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FUNDFLR {
bits: bool,
}
impl FUNDFLR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FOVFLR {
bits: bool,
}
impl FOVFLR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FSIZER {
bits: bool,
}
impl FSIZER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _XCMPWRW<'a> {
w: &'a mut W,
}
impl<'a> _XCMPWRW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _XCMPWFW<'a> {
w: &'a mut W,
}
impl<'a> _XCMPWFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _XCMPRRW<'a> {
w: &'a mut W,
}
impl<'a> _XCMPRRW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _XCMPRFW<'a> {
w: &'a mut W,
}
impl<'a> _XCMPRFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IOINTWW<'a> {
w: &'a mut W,
}
impl<'a> _IOINTWW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _GENADW<'a> {
w: &'a mut W,
}
impl<'a> _GENADW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FRDERRW<'a> {
w: &'a mut W,
}
impl<'a> _FRDERRW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FUNDFLW<'a> {
w: &'a mut W,
}
impl<'a> _FUNDFLW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FOVFLW<'a> {
w: &'a mut W,
}
impl<'a> _FOVFLW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FSIZEW<'a> {
w: &'a mut W,
}
impl<'a> _FSIZEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 9 - Transfer complete interrupt, write to register space."]
#[inline]
pub fn xcmpwr(&self) -> XCMPWRR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
};
XCMPWRR { bits }
}
#[doc = "Bit 8 - Transfer complete interrupt, write to FIFO space."]
#[inline]
pub fn xcmpwf(&self) -> XCMPWFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
};
XCMPWFR { bits }
}
#[doc = "Bit 7 - Transfer complete interrupt, read from register space."]
#[inline]
pub fn xcmprr(&self) -> XCMPRRR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
XCMPRRR { bits }
}
#[doc = "Bit 6 - Transfer complete interrupt, read from FIFO space."]
#[inline]
pub fn xcmprf(&self) -> XCMPRFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
};
XCMPRFR { bits }
}
#[doc = "Bit 5 - IO Write interrupt."]
#[inline]
pub fn iointw(&self) -> IOINTWR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IOINTWR { bits }
}
#[doc = "Bit 4 - I2C General Address interrupt."]
#[inline]
pub fn genad(&self) -> GENADR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
};
GENADR { bits }
}
#[doc = "Bit 3 - FIFO Read Error interrupt."]
#[inline]
pub fn frderr(&self) -> FRDERRR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FRDERRR { bits }
}
#[doc = "Bit 2 - FIFO Underflow interrupt."]
#[inline]
pub fn fundfl(&self) -> FUNDFLR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FUNDFLR { bits }
}
#[doc = "Bit 1 - FIFO Overflow interrupt."]
#[inline]
pub fn fovfl(&self) -> FOVFLR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FOVFLR { bits }
}
#[doc = "Bit 0 - FIFO Size interrupt."]
#[inline]
pub fn fsize(&self) -> FSIZER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FSIZER { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 9 - Transfer complete interrupt, write to register space."]
#[inline]
pub fn xcmpwr(&mut self) -> _XCMPWRW {
_XCMPWRW { w: self }
}
#[doc = "Bit 8 - Transfer complete interrupt, write to FIFO space."]
#[inline]
pub fn xcmpwf(&mut self) -> _XCMPWFW {
_XCMPWFW { w: self }
}
#[doc = "Bit 7 - Transfer complete interrupt, read from register space."]
#[inline]
pub fn xcmprr(&mut self) -> _XCMPRRW {
_XCMPRRW { w: self }
}
#[doc = "Bit 6 - Transfer complete interrupt, read from FIFO space."]
#[inline]
pub fn xcmprf(&mut self) -> _XCMPRFW {
_XCMPRFW { w: self }
}
#[doc = "Bit 5 - IO Write interrupt."]
#[inline]
pub fn iointw(&mut self) -> _IOINTWW {
_IOINTWW { w: self }
}
#[doc = "Bit 4 - I2C General Address interrupt."]
#[inline]
pub fn genad(&mut self) -> _GENADW {
_GENADW { w: self }
}
#[doc = "Bit 3 - FIFO Read Error interrupt."]
#[inline]
pub fn frderr(&mut self) -> _FRDERRW {
_FRDERRW { w: self }
}
#[doc = "Bit 2 - FIFO Underflow interrupt."]
#[inline]
pub fn fundfl(&mut self) -> _FUNDFLW {
_FUNDFLW { w: self }
}
#[doc = "Bit 1 - FIFO Overflow interrupt."]
#[inline]
pub fn fovfl(&mut self) -> _FOVFLW {
_FOVFLW { w: self }
}
#[doc = "Bit 0 - FIFO Size interrupt."]
#[inline]
pub fn fsize(&mut self) -> _FSIZEW {
_FSIZEW { w: self }
}
}
| 25.635115 | 77 | 0.499077 |
f488f63f5efd603fe0b62e01a99358b7bf3a8239 | 4,879 | #![no_std]
#![allow(non_camel_case_types)]
#[cfg(not(feature = "device-selected"))]
compile_error!(
"This crate requires one of the following device features enabled:
stm32f401
stm32f405
stm32f407
stm32f410
stm32f411
stm32f412
stm32f413
stm32f415
stm32f417
stm32f423
stm32f427
stm32f429
stm32f437
stm32f439
stm32f446
stm32f469
stm32f479"
);
pub use embedded_hal as hal;
pub use nb;
pub use nb::block;
#[cfg(feature = "stm32f401")]
pub use stm32f4::stm32f401 as stm32;
#[cfg(feature = "stm32f405")]
pub use stm32f4::stm32f405 as stm32;
#[cfg(feature = "stm32f407")]
pub use stm32f4::stm32f407 as stm32;
#[cfg(feature = "stm32f410")]
pub use stm32f4::stm32f410 as stm32;
#[cfg(feature = "stm32f411")]
pub use stm32f4::stm32f411 as stm32;
#[cfg(feature = "stm32f412")]
pub use stm32f4::stm32f412 as stm32;
#[cfg(feature = "stm32f413")]
pub use stm32f4::stm32f413 as stm32;
#[cfg(feature = "stm32f415")]
pub use stm32f4::stm32f405 as stm32;
#[cfg(feature = "stm32f417")]
pub use stm32f4::stm32f407 as stm32;
#[cfg(feature = "stm32f423")]
pub use stm32f4::stm32f413 as stm32;
#[cfg(feature = "stm32f427")]
pub use stm32f4::stm32f427 as stm32;
#[cfg(feature = "stm32f429")]
pub use stm32f4::stm32f429 as stm32;
#[cfg(feature = "stm32f437")]
pub use stm32f4::stm32f427 as stm32;
#[cfg(feature = "stm32f439")]
pub use stm32f4::stm32f429 as stm32;
#[cfg(feature = "stm32f446")]
pub use stm32f4::stm32f446 as stm32;
#[cfg(feature = "stm32f469")]
pub use stm32f4::stm32f469 as stm32;
#[cfg(feature = "stm32f479")]
pub use stm32f4::stm32f469 as stm32;
// Enable use of interrupt macro
#[cfg(feature = "rt")]
pub use crate::stm32::interrupt;
#[cfg(feature = "device-selected")]
pub mod adc;
#[cfg(feature = "device-selected")]
pub mod bb;
#[cfg(all(
feature = "can",
any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479",
)
))]
pub mod can;
#[cfg(feature = "device-selected")]
pub mod crc32;
#[cfg(all(
feature = "device-selected",
not(any(feature = "stm32f411", feature = "stm32f412", feature = "stm32f401",))
))]
pub mod dac;
#[cfg(feature = "device-selected")]
pub mod delay;
#[cfg(feature = "device-selected")]
pub mod gpio;
#[cfg(feature = "device-selected")]
pub mod i2c;
#[cfg(all(feature = "device-selected", feature = "i2s"))]
pub mod i2s;
#[cfg(all(
feature = "usb_fs",
any(
feature = "stm32f401",
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479",
)
))]
pub mod otg_fs;
#[cfg(all(
any(feature = "usb_hs", docsrs),
any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479",
)
))]
pub mod otg_hs;
#[cfg(all(
feature = "device-selected",
not(any(
feature = "stm32f401",
feature = "stm32f410",
feature = "stm32f411",
feature = "stm32f446",
))
))]
pub mod rng;
#[cfg(feature = "device-selected")]
pub use stm32 as pac;
#[cfg(feature = "device-selected")]
pub mod dma;
#[cfg(feature = "device-selected")]
pub mod dwt;
#[cfg(feature = "device-selected")]
pub mod prelude;
#[cfg(feature = "device-selected")]
pub mod pwm;
#[cfg(feature = "device-selected")]
pub mod qei;
#[cfg(feature = "device-selected")]
pub mod rcc;
#[cfg(feature = "device-selected")]
pub mod rtc;
#[cfg(all(
feature = "sdio",
not(any(feature = "stm32f410", feature = "stm32f446",))
))]
pub mod sdio;
#[cfg(feature = "device-selected")]
pub mod serial;
#[cfg(feature = "device-selected")]
pub mod signature;
#[cfg(feature = "device-selected")]
pub mod spi;
#[cfg(feature = "device-selected")]
pub mod syscfg;
#[cfg(feature = "device-selected")]
pub mod time;
#[cfg(feature = "device-selected")]
pub mod timer;
#[cfg(feature = "device-selected")]
pub mod watchdog;
| 23.014151 | 82 | 0.614265 |
f42382074fc18e8071619e76d931ac94f3292e39 | 5,078 | //! The systems that power each [`InputManagerPlugin`](crate::InputManagerPlugin).
use crate::{
action_state::{ActionDiff, ActionState, ActionStateDriver},
input_map::InputMap,
user_input::InputStreams,
Actionlike,
};
use bevy::prelude::*;
/// Clears the just-pressed and just-released values of all [`ActionState`]s
///
/// Also resets the internal `pressed_this_tick` field, used to track whether or not to release an action.
pub fn tick_action_state<A: Actionlike>(mut query: Query<&mut ActionState<A>>, time: Res<Time>) {
for mut action_state in query.iter_mut() {
// If `Time` has not ever been advanced, something has gone horribly wrong
// and the user probably forgot to add the `core_plugin`.
action_state.tick(
time.last_update()
.expect("The `Time` resource has never been updated!"),
);
}
}
/// Fetches all of the releveant [`Input`] resources to update [`ActionState`] according to the [`InputMap`]
///
/// Missing resources will be ignored, and treated as if none of the corresponding inputs were pressed
pub fn update_action_state<A: Actionlike>(
maybe_gamepad_input_stream: Option<Res<Input<GamepadButton>>>,
maybe_keyboard_input_stream: Option<Res<Input<KeyCode>>>,
maybe_mouse_input_stream: Option<Res<Input<MouseButton>>>,
mut query: Query<(&mut ActionState<A>, &InputMap<A>)>,
) {
let gamepad = maybe_gamepad_input_stream.as_deref();
let keyboard = maybe_keyboard_input_stream.as_deref();
let mouse = maybe_mouse_input_stream.as_deref();
for (mut action_state, input_map) in query.iter_mut() {
let input_streams = InputStreams {
gamepad,
keyboard,
mouse,
associated_gamepad: input_map.gamepad(),
};
let pressed_set = input_map.which_pressed(&input_streams);
action_state.update(pressed_set);
}
}
/// When a button with a component `A` is clicked, press the corresponding virtual button in the [`ActionState`]
///
/// The action triggered is determined by the variant stored in your UI-defined button.
pub fn update_action_state_from_interaction<A: Actionlike>(
ui_query: Query<(&Interaction, &ActionStateDriver<A>)>,
mut action_state_query: Query<&mut ActionState<A>>,
) {
for (&interaction, action_state_driver) in ui_query.iter() {
if interaction == Interaction::Clicked {
let mut action_state = action_state_query
.get_mut(action_state_driver.entity)
.expect("Entity does not exist, or does not have an `ActionState` component.");
action_state.press(&action_state_driver.action);
}
}
}
/// Generates an [`Events`](bevy::ecs::event::Events) stream of [`ActionDiff`] from [`ActionState`]
///
/// The `ID` generic type should be a stable entity identifer,
/// suitable to be sent across a network.
///
/// This system is not part of the [`InputManagerPlugin`](crate::plugin::InputManagerPlugin) and must be added manually.
pub fn generate_action_diffs<A: Actionlike, ID: Eq + Clone + Component>(
action_state_query: Query<(&ActionState<A>, &ID)>,
mut action_diffs: EventWriter<ActionDiff<A, ID>>,
) {
for (action_state, id) in action_state_query.iter() {
for action in action_state.get_just_pressed() {
action_diffs.send(ActionDiff::Pressed {
action: action.clone(),
id: id.clone(),
});
}
for action in action_state.get_just_released() {
action_diffs.send(ActionDiff::Released {
action: action.clone(),
id: id.clone(),
});
}
}
}
/// Generates an [`Events`](bevy::ecs::event::Events) stream of [`ActionDiff`] from [`ActionState`]
///
/// The `ID` generic type should be a stable entity identifer,
/// suitable to be sent across a network.
///
/// This system is not part of the [`InputManagerPlugin`](crate::plugin::InputManagerPlugin) and must be added manually.
pub fn process_action_diffs<A: Actionlike, ID: Eq + Component + Clone>(
mut action_state_query: Query<(&mut ActionState<A>, &ID)>,
mut action_diffs: EventReader<ActionDiff<A, ID>>,
) {
// PERF: This would probably be faster with an index, but is much more fussy
for action_diff in action_diffs.iter() {
for (mut action_state, id) in action_state_query.iter_mut() {
match action_diff {
ActionDiff::Pressed {
action,
id: event_id,
} => {
if event_id == id {
action_state.press(action);
continue;
}
}
ActionDiff::Released {
action,
id: event_id,
} => {
if event_id == id {
action_state.release(action);
continue;
}
}
};
}
}
}
| 37.895522 | 120 | 0.618748 |
9baf1ee9db6ba3bdfbe51f07ef32cc87725ea72f | 11,375 | pub mod mock;
use crate::mock::{MockComponentBuilder, MockFile, MockInstallerBuilder};
use rustup::dist::component::Components;
use rustup::dist::component::Transaction;
use rustup::dist::component::{DirectoryPackage, Package};
use rustup::dist::dist::DEFAULT_DIST_SERVER;
use rustup::dist::prefix::InstallPrefix;
use rustup::dist::temp;
use rustup::dist::Notification;
use rustup::utils::utils;
use rustup::ErrorKind;
use std::fs::File;
use std::io::Write;
// Just testing that the mocks work
#[test]
fn mock_smoke_test() {
let tempdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![
MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![
MockFile::new("bin/foo", b"foo"),
MockFile::new("lib/bar", b"bar"),
MockFile::new_dir("doc/stuff", &[("doc1", b"", false), ("doc2", b"", false)]),
],
},
MockComponentBuilder {
name: "mycomponent2".to_string(),
files: vec![MockFile::new("bin/quux", b"quux")],
},
],
};
mock.build(tempdir.path());
assert!(tempdir.path().join("components").exists());
assert!(tempdir.path().join("mycomponent/manifest.in").exists());
assert!(tempdir.path().join("mycomponent/bin/foo").exists());
assert!(tempdir.path().join("mycomponent/lib/bar").exists());
assert!(tempdir.path().join("mycomponent/doc/stuff/doc1").exists());
assert!(tempdir.path().join("mycomponent/doc/stuff/doc2").exists());
assert!(tempdir.path().join("mycomponent2/manifest.in").exists());
assert!(tempdir.path().join("mycomponent2/bin/quux").exists());
}
#[test]
fn package_contains() {
let tempdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![
MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![MockFile::new("bin/foo", b"foo")],
},
MockComponentBuilder {
name: "mycomponent2".to_string(),
files: vec![MockFile::new("bin/bar", b"bar")],
},
],
};
mock.build(tempdir.path());
let package = DirectoryPackage::new(tempdir.path().to_owned(), true).unwrap();
assert!(package.contains("mycomponent", None));
assert!(package.contains("mycomponent2", None));
}
#[test]
fn package_bad_version() {
let tempdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![MockFile::new("bin/foo", b"foo")],
}],
};
mock.build(tempdir.path());
let mut ver = File::create(tempdir.path().join("rust-installer-version")).unwrap();
writeln!(ver, "100").unwrap();
assert!(DirectoryPackage::new(tempdir.path().to_owned(), true).is_err());
}
#[test]
fn basic_install() {
let pkgdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![
MockFile::new("bin/foo", b"foo"),
MockFile::new("lib/bar", b"bar"),
MockFile::new_dir("doc/stuff", &[("doc1", b"", false), ("doc2", b"", false)]),
],
}],
};
mock.build(pkgdir.path());
let instdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let prefix = InstallPrefix::from(instdir.path().to_owned());
let tmpdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let tmpcfg = temp::Cfg::new(
tmpdir.path().to_owned(),
DEFAULT_DIST_SERVER,
Box::new(|_| ()),
);
let notify = |_: Notification<'_>| ();
let tx = Transaction::new(prefix.clone(), &tmpcfg, ¬ify);
let components = Components::open(prefix).unwrap();
let pkg = DirectoryPackage::new(pkgdir.path().to_owned(), true).unwrap();
let tx = pkg.install(&components, "mycomponent", None, tx).unwrap();
tx.commit();
assert!(utils::path_exists(instdir.path().join("bin/foo")));
assert!(utils::path_exists(instdir.path().join("lib/bar")));
assert!(utils::path_exists(instdir.path().join("doc/stuff/doc1")));
assert!(utils::path_exists(instdir.path().join("doc/stuff/doc2")));
assert!(components.find("mycomponent").unwrap().is_some());
}
#[test]
fn multiple_component_install() {
let pkgdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![
MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![MockFile::new("bin/foo", b"foo")],
},
MockComponentBuilder {
name: "mycomponent2".to_string(),
files: vec![MockFile::new("lib/bar", b"bar")],
},
],
};
mock.build(pkgdir.path());
let instdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let prefix = InstallPrefix::from(instdir.path().to_owned());
let tmpdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let tmpcfg = temp::Cfg::new(
tmpdir.path().to_owned(),
DEFAULT_DIST_SERVER,
Box::new(|_| ()),
);
let notify = |_: Notification<'_>| ();
let tx = Transaction::new(prefix.clone(), &tmpcfg, ¬ify);
let components = Components::open(prefix).unwrap();
let pkg = DirectoryPackage::new(pkgdir.path().to_owned(), true).unwrap();
let tx = pkg.install(&components, "mycomponent", None, tx).unwrap();
let tx = pkg.install(&components, "mycomponent2", None, tx).unwrap();
tx.commit();
assert!(utils::path_exists(instdir.path().join("bin/foo")));
assert!(utils::path_exists(instdir.path().join("lib/bar")));
assert!(components.find("mycomponent").unwrap().is_some());
assert!(components.find("mycomponent2").unwrap().is_some());
}
#[test]
fn uninstall() {
let pkgdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![
MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![
MockFile::new("bin/foo", b"foo"),
MockFile::new("lib/bar", b"bar"),
MockFile::new_dir("doc/stuff", &[("doc1", b"", false), ("doc2", b"", false)]),
],
},
MockComponentBuilder {
name: "mycomponent2".to_string(),
files: vec![MockFile::new("lib/quux", b"quux")],
},
],
};
mock.build(pkgdir.path());
let instdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let prefix = InstallPrefix::from(instdir.path().to_owned());
let tmpdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let tmpcfg = temp::Cfg::new(
tmpdir.path().to_owned(),
DEFAULT_DIST_SERVER,
Box::new(|_| ()),
);
let notify = |_: Notification<'_>| ();
let tx = Transaction::new(prefix.clone(), &tmpcfg, ¬ify);
let components = Components::open(prefix.clone()).unwrap();
let pkg = DirectoryPackage::new(pkgdir.path().to_owned(), true).unwrap();
let tx = pkg.install(&components, "mycomponent", None, tx).unwrap();
let tx = pkg.install(&components, "mycomponent2", None, tx).unwrap();
tx.commit();
// Now uninstall
let notify = |_: Notification<'_>| ();
let mut tx = Transaction::new(prefix, &tmpcfg, ¬ify);
for component in components.list().unwrap() {
tx = component.uninstall(tx).unwrap();
}
tx.commit();
assert!(!utils::path_exists(instdir.path().join("bin/foo")));
assert!(!utils::path_exists(instdir.path().join("lib/bar")));
assert!(!utils::path_exists(instdir.path().join("doc/stuff/doc1")));
assert!(!utils::path_exists(instdir.path().join("doc/stuff/doc2")));
assert!(!utils::path_exists(instdir.path().join("doc/stuff")));
assert!(components.find("mycomponent").unwrap().is_none());
assert!(components.find("mycomponent2").unwrap().is_none());
}
// If any single file can't be uninstalled, it is not a fatal error
// and the subsequent files will still be removed.
#[test]
fn uninstall_best_effort() {
//unimplemented!()
}
#[test]
fn component_bad_version() {
let pkgdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![MockFile::new("bin/foo", b"foo")],
}],
};
mock.build(pkgdir.path());
let instdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let prefix = InstallPrefix::from(instdir.path().to_owned());
let tmpdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let tmpcfg = temp::Cfg::new(
tmpdir.path().to_owned(),
DEFAULT_DIST_SERVER,
Box::new(|_| ()),
);
let notify = |_: Notification<'_>| ();
let tx = Transaction::new(prefix.clone(), &tmpcfg, ¬ify);
let components = Components::open(prefix.clone()).unwrap();
let pkg = DirectoryPackage::new(pkgdir.path().to_owned(), true).unwrap();
let tx = pkg.install(&components, "mycomponent", None, tx).unwrap();
tx.commit();
// Write a bogus version to the component manifest directory
utils::write_file("", &prefix.manifest_file("rust-installer-version"), "100\n").unwrap();
// Can't open components now
let e = Components::open(prefix).unwrap_err();
if let ErrorKind::BadInstalledMetadataVersion(_) = *e.kind() {
} else {
panic!()
}
}
// Installing to a prefix that doesn't exist creates it automatically
#[test]
fn install_to_prefix_that_does_not_exist() {
let pkgdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let mock = MockInstallerBuilder {
components: vec![MockComponentBuilder {
name: "mycomponent".to_string(),
files: vec![MockFile::new("bin/foo", b"foo")],
}],
};
mock.build(pkgdir.path());
let instdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
// The directory that does not exist
let does_not_exist = instdir.path().join("super_not_real");
let prefix = InstallPrefix::from(does_not_exist.clone());
let tmpdir = tempfile::Builder::new().prefix("rustup").tempdir().unwrap();
let tmpcfg = temp::Cfg::new(
tmpdir.path().to_owned(),
DEFAULT_DIST_SERVER,
Box::new(|_| ()),
);
let notify = |_: Notification<'_>| ();
let tx = Transaction::new(prefix.clone(), &tmpcfg, ¬ify);
let components = Components::open(prefix).unwrap();
let pkg = DirectoryPackage::new(pkgdir.path().to_owned(), true).unwrap();
let tx = pkg.install(&components, "mycomponent", None, tx).unwrap();
tx.commit();
assert!(utils::path_exists(does_not_exist.join("bin/foo")));
}
| 34.469697 | 98 | 0.599385 |
0e513c6d09033cf90767c414b5d4283d827da8dc | 4,278 | //! Native rust database driver for SAP HANA (TM).
//!
//! `hdbconnect` provides a lean, fast, and easy-to-use rust-API for working with
//! SAP HANA. The driver is written completely in rust.
//! It interoperates elegantly with all data types that implement the standard
//! `serde::Serialize` and/or `serde::Deserialize` traits, for input and output respectively.
//! So, instead of iterating over a resultset by rows and columns, you can
//! assign the complete resultset directly to any rust structure that fits the data
//! semantics.
//!
//! `hdbconnect` implements this with the help of [`serde_db`](https://docs.rs/serde_db),
//! a reusable library for simplifying the data exchange between application code
//! and database drivers, both for input parameters (e.g. to prepared statements)
//! and for results that are returned from the database.
//!
//! In contrast to typical ORM mapping variants, this approach allows
//! using the full flexibility of SQL (projection lists, all kinds of joins,
//! unions, nested queries, etc). Whatever query you need, you just use it, without further ado
//! for defining object models etc., and whatever result structure you want to read,
//! you just use a corresponding rust structure into
//! which you deserialize the data. It's hard to use less code!
//!
//! See [code examples](crate::code_examples) for an overview.
//!
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![deny(clippy::all)]
#![deny(clippy::pedantic)]
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::non_ascii_literal)]
#![allow(clippy::must_use_candidate)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde;
mod conn;
mod hdb_error;
mod hdb_response;
mod hdb_return_value;
mod protocol;
mod serde_db_impl;
mod sync_connection;
mod sync_connection_manager;
mod sync_prepared_statement;
mod types_impl;
mod xa_impl;
pub mod code_examples;
pub use crate::conn::{
url, ConnectParams, ConnectParamsBuilder, IntoConnectParams, IntoConnectParamsBuilder,
ServerCerts,
};
pub use crate::hdb_error::{HdbError, HdbResult};
pub use crate::hdb_response::HdbResponse;
pub use crate::hdb_return_value::HdbReturnValue;
pub use crate::protocol::parts::{
ExecutionResult, FieldMetadata, HdbValue, OutputParameters, ParameterBinding,
ParameterDescriptor, ParameterDescriptors, ParameterDirection, ResultSet, Row, ServerError,
Severity, TypeId,
};
pub use crate::protocol::ServerUsage;
pub use crate::serde_db_impl::{time, ToHana};
pub use crate::sync_connection::Connection;
pub use crate::sync_connection_manager::ConnectionManager;
pub use crate::sync_prepared_statement::PreparedStatement;
pub use r2d2;
/// Non-standard types that are used within the
/// [`HdbValue`](crate::HdbValue)s in a [`ResultSet`](crate::ResultSet).
///
/// A `ResultSet` contains a sequence of Rows, each row is a sequence of
/// `HdbValue`s. Some of the `HdbValue`s are implemented using `LongDate`,
/// BLOB, etc.
pub mod types {
pub use crate::types_impl::lob::{BLob, CLob, CharLobSlice, NCLob};
pub use crate::types_impl::daydate::DayDate;
pub use crate::types_impl::longdate::LongDate;
pub use crate::types_impl::seconddate::SecondDate;
pub use crate::types_impl::secondtime::SecondTime;
}
/// Default value for the number of resultset lines that are fetched
/// with a single FETCH roundtrip; the constant's value is 100,000.
///
/// The value used at runtime can be changed with
/// [`Connection::set_fetch_size()`](crate::Connection::set_fetch_size).
pub const DEFAULT_FETCH_SIZE: u32 = 100_000;
/// Number of bytes (for BLOBS and CLOBS) or 1-2-3-byte sequences (for NCLOBS)
/// that are fetched in a single LOB READ roundtrip; the constant's value is 16,000,000.
///
/// The value used at runtime can be changed with
/// [`Connection::set_lob_read_length()`](crate::Connection::set_lob_read_length).
pub const DEFAULT_LOB_READ_LENGTH: u32 = 16_000_000;
/// Number of bytes that are written in a single LOB WRITE roundtrip;
/// the constant's value is 16,000,000.
///
/// The value used at runtime can be changed with
/// [`Connection::set_lob_write_length()`](crate::Connection::set_lob_write_length).
pub const DEFAULT_LOB_WRITE_LENGTH: usize = 16_000_000;
| 38.890909 | 95 | 0.751052 |
c1a97cd94dc3945690ceb9fde728c0252b8db075 | 38,594 | //! A pass that annotates every item and method with its stability level,
//! propagating default levels lexically from parent to children ast nodes.
pub use self::StabilityLevel::*;
use crate::lint::{self, Lint, in_derive_expansion};
use crate::hir::{self, Item, Generics, StructField, Variant, HirId};
use crate::hir::def::Def;
use crate::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE};
use crate::hir::intravisit::{self, Visitor, NestedVisitorMap};
use crate::ty::query::Providers;
use crate::middle::privacy::AccessLevels;
use crate::session::{DiagnosticMessageId, Session};
use syntax::symbol::Symbol;
use syntax_pos::{Span, MultiSpan};
use syntax::ast::Attribute;
use syntax::errors::Applicability;
use syntax::feature_gate::{GateIssue, emit_feature_err};
use syntax::attr::{self, Stability, Deprecation};
use crate::ty::{self, TyCtxt};
use crate::util::nodemap::{FxHashSet, FxHashMap};
use std::mem::replace;
use std::cmp::Ordering;
#[derive(RustcEncodable, RustcDecodable, PartialEq, PartialOrd, Clone, Copy, Debug, Eq, Hash)]
pub enum StabilityLevel {
Unstable,
Stable,
}
impl StabilityLevel {
pub fn from_attr_level(level: &attr::StabilityLevel) -> Self {
if level.is_stable() { Stable } else { Unstable }
}
}
#[derive(PartialEq)]
enum AnnotationKind {
// Annotation is required if not inherited from unstable parents
Required,
// Annotation is useless, reject it
Prohibited,
// Annotation itself is useless, but it can be propagated to children
Container,
}
/// An entry in the `depr_map`.
#[derive(Clone)]
pub struct DeprecationEntry {
/// The metadata of the attribute associated with this entry.
pub attr: Deprecation,
/// The `DefId` where the attr was originally attached. `None` for non-local
/// `DefId`'s.
origin: Option<HirId>,
}
impl_stable_hash_for!(struct self::DeprecationEntry {
attr,
origin
});
impl DeprecationEntry {
fn local(attr: Deprecation, id: HirId) -> DeprecationEntry {
DeprecationEntry {
attr,
origin: Some(id),
}
}
pub fn external(attr: Deprecation) -> DeprecationEntry {
DeprecationEntry {
attr,
origin: None,
}
}
pub fn same_origin(&self, other: &DeprecationEntry) -> bool {
match (self.origin, other.origin) {
(Some(o1), Some(o2)) => o1 == o2,
_ => false
}
}
}
/// A stability index, giving the stability level for items and methods.
pub struct Index<'tcx> {
/// This is mostly a cache, except the stabilities of local items
/// are filled by the annotator.
stab_map: FxHashMap<HirId, &'tcx Stability>,
depr_map: FxHashMap<HirId, DeprecationEntry>,
/// Maps for each crate whether it is part of the staged API.
staged_api: FxHashMap<CrateNum, bool>,
/// Features enabled for this crate.
active_features: FxHashSet<Symbol>,
}
impl_stable_hash_for!(struct self::Index<'tcx> {
stab_map,
depr_map,
staged_api,
active_features
});
// A private tree-walker for producing an Index.
struct Annotator<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
index: &'a mut Index<'tcx>,
parent_stab: Option<&'tcx Stability>,
parent_depr: Option<DeprecationEntry>,
in_trait_impl: bool,
}
impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> {
// Determine the stability for a node based on its attributes and inherited
// stability. The stability is recorded in the index and used as the parent.
fn annotate<F>(&mut self, hir_id: HirId, attrs: &[Attribute],
item_sp: Span, kind: AnnotationKind, visit_children: F)
where F: FnOnce(&mut Self)
{
if self.tcx.features().staged_api {
// This crate explicitly wants staged API.
debug!("annotate(id = {:?}, attrs = {:?})", hir_id, attrs);
if let Some(..) = attr::find_deprecation(&self.tcx.sess.parse_sess, attrs, item_sp) {
self.tcx.sess.span_err(item_sp, "`#[deprecated]` cannot be used in staged api, \
use `#[rustc_deprecated]` instead");
}
if let Some(mut stab) = attr::find_stability(&self.tcx.sess.parse_sess,
attrs, item_sp) {
// Error if prohibited, or can't inherit anything from a container
if kind == AnnotationKind::Prohibited ||
(kind == AnnotationKind::Container &&
stab.level.is_stable() &&
stab.rustc_depr.is_none()) {
self.tcx.sess.span_err(item_sp, "This stability annotation is useless");
}
debug!("annotate: found {:?}", stab);
// If parent is deprecated and we're not, inherit this by merging
// deprecated_since and its reason.
if let Some(parent_stab) = self.parent_stab {
if parent_stab.rustc_depr.is_some() && stab.rustc_depr.is_none() {
stab.rustc_depr = parent_stab.rustc_depr.clone()
}
}
let stab = self.tcx.intern_stability(stab);
// Check if deprecated_since < stable_since. If it is,
// this is *almost surely* an accident.
if let (&Some(attr::RustcDeprecation {since: dep_since, ..}),
&attr::Stable {since: stab_since}) = (&stab.rustc_depr, &stab.level) {
// Explicit version of iter::order::lt to handle parse errors properly
for (dep_v, stab_v) in dep_since.as_str()
.split('.')
.zip(stab_since.as_str().split('.'))
{
if let (Ok(dep_v), Ok(stab_v)) = (dep_v.parse::<u64>(), stab_v.parse()) {
match dep_v.cmp(&stab_v) {
Ordering::Less => {
self.tcx.sess.span_err(item_sp, "An API can't be stabilized \
after it is deprecated");
break
}
Ordering::Equal => continue,
Ordering::Greater => break,
}
} else {
// Act like it isn't less because the question is now nonsensical,
// and this makes us not do anything else interesting.
self.tcx.sess.span_err(item_sp, "Invalid stability or deprecation \
version found");
break
}
}
}
self.index.stab_map.insert(hir_id, stab);
let orig_parent_stab = replace(&mut self.parent_stab, Some(stab));
visit_children(self);
self.parent_stab = orig_parent_stab;
} else {
debug!("annotate: not found, parent = {:?}", self.parent_stab);
if let Some(stab) = self.parent_stab {
if stab.level.is_unstable() {
self.index.stab_map.insert(hir_id, stab);
}
}
visit_children(self);
}
} else {
// Emit errors for non-staged-api crates.
for attr in attrs {
if let Some(tag) = attr.ident_str() {
if tag == "unstable" || tag == "stable" || tag == "rustc_deprecated" {
attr::mark_used(attr);
self.tcx.sess.span_err(attr.span, "stability attributes may not be used \
outside of the standard library");
}
}
}
// Propagate unstability. This can happen even for non-staged-api crates in case
// -Zforce-unstable-if-unmarked is set.
if let Some(stab) = self.parent_stab {
if stab.level.is_unstable() {
self.index.stab_map.insert(hir_id, stab);
}
}
if let Some(depr) = attr::find_deprecation(&self.tcx.sess.parse_sess, attrs, item_sp) {
if kind == AnnotationKind::Prohibited {
self.tcx.sess.span_err(item_sp, "This deprecation annotation is useless");
}
// `Deprecation` is just two pointers, no need to intern it
let depr_entry = DeprecationEntry::local(depr, hir_id);
self.index.depr_map.insert(hir_id, depr_entry.clone());
let orig_parent_depr = replace(&mut self.parent_depr,
Some(depr_entry));
visit_children(self);
self.parent_depr = orig_parent_depr;
} else if let Some(parent_depr) = self.parent_depr.clone() {
self.index.depr_map.insert(hir_id, parent_depr);
visit_children(self);
} else {
visit_children(self);
}
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
/// Because stability levels are scoped lexically, we want to walk
/// nested items in the context of the outer item, so enable
/// deep-walking.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_item(&mut self, i: &'tcx Item) {
let orig_in_trait_impl = self.in_trait_impl;
let mut kind = AnnotationKind::Required;
match i.node {
// Inherent impls and foreign modules serve only as containers for other items,
// they don't have their own stability. They still can be annotated as unstable
// and propagate this unstability to children, but this annotation is completely
// optional. They inherit stability from their parents when unannotated.
hir::ItemKind::Impl(.., None, _, _) | hir::ItemKind::ForeignMod(..) => {
self.in_trait_impl = false;
kind = AnnotationKind::Container;
}
hir::ItemKind::Impl(.., Some(_), _, _) => {
self.in_trait_impl = true;
}
hir::ItemKind::Struct(ref sd, _) => {
if !sd.is_struct() {
self.annotate(sd.hir_id(), &i.attrs, i.span, AnnotationKind::Required, |_| {})
}
}
_ => {}
}
self.annotate(i.hir_id, &i.attrs, i.span, kind, |v| {
intravisit::walk_item(v, i)
});
self.in_trait_impl = orig_in_trait_impl;
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) {
self.annotate(ti.hir_id, &ti.attrs, ti.span, AnnotationKind::Required, |v| {
intravisit::walk_trait_item(v, ti);
});
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) {
let kind = if self.in_trait_impl {
AnnotationKind::Prohibited
} else {
AnnotationKind::Required
};
self.annotate(ii.hir_id, &ii.attrs, ii.span, kind, |v| {
intravisit::walk_impl_item(v, ii);
});
}
fn visit_variant(&mut self, var: &'tcx Variant, g: &'tcx Generics, item_id: HirId) {
self.annotate(var.node.data.hir_id(), &var.node.attrs, var.span, AnnotationKind::Required,
|v| { intravisit::walk_variant(v, var, g, item_id) })
}
fn visit_struct_field(&mut self, s: &'tcx StructField) {
self.annotate(s.hir_id, &s.attrs, s.span, AnnotationKind::Required, |v| {
intravisit::walk_struct_field(v, s);
});
}
fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem) {
self.annotate(i.hir_id, &i.attrs, i.span, AnnotationKind::Required, |v| {
intravisit::walk_foreign_item(v, i);
});
}
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef) {
self.annotate(md.hir_id, &md.attrs, md.span, AnnotationKind::Required, |_| {});
}
}
struct MissingStabilityAnnotations<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
access_levels: &'a AccessLevels,
}
impl<'a, 'tcx: 'a> MissingStabilityAnnotations<'a, 'tcx> {
fn check_missing_stability(&self, hir_id: HirId, span: Span, name: &str) {
let stab = self.tcx.stability().local_stability(hir_id);
let is_error = !self.tcx.sess.opts.test &&
stab.is_none() &&
self.access_levels.is_reachable(self.tcx.hir().hir_to_node_id(hir_id));
if is_error {
self.tcx.sess.span_err(
span,
&format!("{} has missing stability attribute", name),
);
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::OnlyBodies(&self.tcx.hir())
}
fn visit_item(&mut self, i: &'tcx Item) {
match i.node {
// Inherent impls and foreign modules serve only as containers for other items,
// they don't have their own stability. They still can be annotated as unstable
// and propagate this unstability to children, but this annotation is completely
// optional. They inherit stability from their parents when unannotated.
hir::ItemKind::Impl(.., None, _, _) | hir::ItemKind::ForeignMod(..) => {}
_ => self.check_missing_stability(i.hir_id, i.span, i.node.descriptive_variant())
}
intravisit::walk_item(self, i)
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) {
self.check_missing_stability(ti.hir_id, ti.span, "item");
intravisit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) {
let impl_def_id = self.tcx.hir().local_def_id_from_hir_id(
self.tcx.hir().get_parent_item(ii.hir_id));
if self.tcx.impl_trait_ref(impl_def_id).is_none() {
self.check_missing_stability(ii.hir_id, ii.span, "item");
}
intravisit::walk_impl_item(self, ii);
}
fn visit_variant(&mut self, var: &'tcx Variant, g: &'tcx Generics, item_id: HirId) {
self.check_missing_stability(var.node.data.hir_id(), var.span, "variant");
intravisit::walk_variant(self, var, g, item_id);
}
fn visit_struct_field(&mut self, s: &'tcx StructField) {
self.check_missing_stability(s.hir_id, s.span, "field");
intravisit::walk_struct_field(self, s);
}
fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem) {
self.check_missing_stability(i.hir_id, i.span, i.node.descriptive_variant());
intravisit::walk_foreign_item(self, i);
}
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef) {
self.check_missing_stability(md.hir_id, md.span, "macro");
}
}
impl<'a, 'tcx> Index<'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Index<'tcx> {
let is_staged_api =
tcx.sess.opts.debugging_opts.force_unstable_if_unmarked ||
tcx.features().staged_api;
let mut staged_api = FxHashMap::default();
staged_api.insert(LOCAL_CRATE, is_staged_api);
let mut index = Index {
staged_api,
stab_map: Default::default(),
depr_map: Default::default(),
active_features: Default::default(),
};
let active_lib_features = &tcx.features().declared_lib_features;
let active_lang_features = &tcx.features().declared_lang_features;
// Put the active features into a map for quick lookup.
index.active_features =
active_lib_features.iter().map(|&(ref s, ..)| s.clone())
.chain(active_lang_features.iter().map(|&(ref s, ..)| s.clone()))
.collect();
{
let krate = tcx.hir().krate();
let mut annotator = Annotator {
tcx,
index: &mut index,
parent_stab: None,
parent_depr: None,
in_trait_impl: false,
};
// If the `-Z force-unstable-if-unmarked` flag is passed then we provide
// a parent stability annotation which indicates that this is private
// with the `rustc_private` feature. This is intended for use when
// compiling librustc crates themselves so we can leverage crates.io
// while maintaining the invariant that all sysroot crates are unstable
// by default and are unable to be used.
if tcx.sess.opts.debugging_opts.force_unstable_if_unmarked {
let reason = "this crate is being loaded from the sysroot, an \
unstable location; did you mean to load this crate \
from crates.io via `Cargo.toml` instead?";
let stability = tcx.intern_stability(Stability {
level: attr::StabilityLevel::Unstable {
reason: Some(Symbol::intern(reason)),
issue: 27812,
},
feature: Symbol::intern("rustc_private"),
rustc_depr: None,
const_stability: None,
promotable: false,
});
annotator.parent_stab = Some(stability);
}
annotator.annotate(hir::CRATE_HIR_ID,
&krate.attrs,
krate.span,
AnnotationKind::Required,
|v| intravisit::walk_crate(v, krate));
}
return index
}
pub fn local_stability(&self, id: HirId) -> Option<&'tcx Stability> {
self.stab_map.get(&id).cloned()
}
pub fn local_deprecation_entry(&self, id: HirId) -> Option<DeprecationEntry> {
self.depr_map.get(&id).cloned()
}
}
/// Cross-references the feature names of unstable APIs with enabled
/// features and possibly prints errors.
fn check_mod_unstable_api_usage<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) {
tcx.hir().visit_item_likes_in_module(module_def_id, &mut Checker { tcx }.as_deep_visitor());
}
pub fn provide(providers: &mut Providers<'_>) {
*providers = Providers {
check_mod_unstable_api_usage,
..*providers
};
}
/// Checks whether an item marked with `deprecated(since="X")` is currently
/// deprecated (i.e., whether X is not greater than the current rustc version).
pub fn deprecation_in_effect(since: &str) -> bool {
fn parse_version(ver: &str) -> Vec<u32> {
// We ignore non-integer components of the version (e.g., "nightly").
ver.split(|c| c == '.' || c == '-').flat_map(|s| s.parse()).collect()
}
if let Some(rustc) = option_env!("CFG_RELEASE") {
let since: Vec<u32> = parse_version(since);
let rustc: Vec<u32> = parse_version(rustc);
// We simply treat invalid `since` attributes as relating to a previous
// Rust version, thus always displaying the warning.
if since.len() != 3 {
return true;
}
since <= rustc
} else {
// By default, a deprecation warning applies to
// the current version of the compiler.
true
}
}
struct Checker<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
/// Result of `TyCtxt::eval_stability`.
pub enum EvalResult {
/// We can use the item because it is stable or we provided the
/// corresponding feature gate.
Allow,
/// We cannot use the item because it is unstable and we did not provide the
/// corresponding feature gate.
Deny {
feature: Symbol,
reason: Option<Symbol>,
issue: u32,
},
/// The item does not have the `#[stable]` or `#[unstable]` marker assigned.
Unmarked,
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// See issue #38412.
fn skip_stability_check_due_to_privacy(self, mut def_id: DefId) -> bool {
// Check if `def_id` is a trait method.
match self.describe_def(def_id) {
Some(Def::Method(_)) |
Some(Def::AssociatedTy(_)) |
Some(Def::AssociatedConst(_)) => {
if let ty::TraitContainer(trait_def_id) = self.associated_item(def_id).container {
// Trait methods do not declare visibility (even
// for visibility info in cstore). Use containing
// trait instead, so methods of `pub` traits are
// themselves considered `pub`.
def_id = trait_def_id;
}
}
_ => {}
}
let visibility = self.visibility(def_id);
match visibility {
// Must check stability for `pub` items.
ty::Visibility::Public => false,
// These are not visible outside crate; therefore
// stability markers are irrelevant, if even present.
ty::Visibility::Restricted(..) |
ty::Visibility::Invisible => true,
}
}
/// Evaluates the stability of an item.
///
/// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding
/// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
/// unstable feature otherwise.
///
/// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been
/// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to
/// `id`.
pub fn eval_stability(self, def_id: DefId, id: Option<HirId>, span: Span) -> EvalResult {
let lint_deprecated = |def_id: DefId,
id: HirId,
note: Option<Symbol>,
suggestion: Option<Symbol>,
message: &str,
lint: &'static Lint| {
if in_derive_expansion(span) {
return;
}
let msg = if let Some(note) = note {
format!("{}: {}", message, note)
} else {
format!("{}", message)
};
let mut diag = self.struct_span_lint_hir(lint, id, span, &msg);
if let Some(suggestion) = suggestion {
if let hir::Node::Expr(_) = self.hir().get_by_hir_id(id) {
diag.span_suggestion(
span,
&msg,
suggestion.to_string(),
Applicability::MachineApplicable,
);
}
}
diag.emit();
if id == hir::DUMMY_HIR_ID {
span_bug!(span, "emitted a {} lint with dummy HIR id: {:?}", lint.name, def_id);
}
};
// Deprecated attributes apply in-crate and cross-crate.
if let Some(id) = id {
if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) {
let parent_def_id = self.hir().local_def_id_from_hir_id(
self.hir().get_parent_item(id));
let skip = self.lookup_deprecation_entry(parent_def_id)
.map_or(false, |parent_depr| parent_depr.same_origin(&depr_entry));
if !skip {
let path = self.def_path_str(def_id);
let message = format!("use of deprecated item '{}'", path);
lint_deprecated(def_id,
id,
depr_entry.attr.note,
None,
&message,
lint::builtin::DEPRECATED);
}
};
}
let is_staged_api = self.lookup_stability(DefId {
index: CRATE_DEF_INDEX,
..def_id
}).is_some();
if !is_staged_api {
return EvalResult::Allow;
}
let stability = self.lookup_stability(def_id);
debug!("stability: \
inspecting def_id={:?} span={:?} of stability={:?}", def_id, span, stability);
if let Some(id) = id {
if let Some(stability) = stability {
if let Some(depr) = &stability.rustc_depr {
let path = self.def_path_str(def_id);
if deprecation_in_effect(&depr.since.as_str()) {
let message = format!("use of deprecated item '{}'", path);
lint_deprecated(def_id,
id,
Some(depr.reason),
depr.suggestion,
&message,
lint::builtin::DEPRECATED);
} else {
let message = format!("use of item '{}' \
that will be deprecated in future version {}",
path,
depr.since);
lint_deprecated(def_id,
id,
Some(depr.reason),
depr.suggestion,
&message,
lint::builtin::DEPRECATED_IN_FUTURE);
}
}
}
}
// Only the cross-crate scenario matters when checking unstable APIs
let cross_crate = !def_id.is_local();
if !cross_crate {
return EvalResult::Allow;
}
// Issue #38412: private items lack stability markers.
if self.skip_stability_check_due_to_privacy(def_id) {
return EvalResult::Allow;
}
match stability {
Some(&Stability { level: attr::Unstable { reason, issue }, feature, .. }) => {
if span.allows_unstable(&feature.as_str()) {
debug!("stability: skipping span={:?} since it is internal", span);
return EvalResult::Allow;
}
if self.stability().active_features.contains(&feature) {
return EvalResult::Allow;
}
// When we're compiling the compiler itself we may pull in
// crates from crates.io, but those crates may depend on other
// crates also pulled in from crates.io. We want to ideally be
// able to compile everything without requiring upstream
// modifications, so in the case that this looks like a
// `rustc_private` crate (e.g., a compiler crate) and we also have
// the `-Z force-unstable-if-unmarked` flag present (we're
// compiling a compiler crate), then let this missing feature
// annotation slide.
if feature == "rustc_private" && issue == 27812 {
if self.sess.opts.debugging_opts.force_unstable_if_unmarked {
return EvalResult::Allow;
}
}
EvalResult::Deny { feature, reason, issue }
}
Some(_) => {
// Stable APIs are always ok to call and deprecated APIs are
// handled by the lint emitting logic above.
EvalResult::Allow
}
None => {
EvalResult::Unmarked
}
}
}
/// Checks if an item is stable or error out.
///
/// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
/// exist, emits an error.
///
/// Additionally, this function will also check if the item is deprecated. If so, and `id` is
/// not `None`, a deprecated lint attached to `id` will be emitted.
pub fn check_stability(self, def_id: DefId, id: Option<HirId>, span: Span) {
match self.eval_stability(def_id, id, span) {
EvalResult::Allow => {}
EvalResult::Deny { feature, reason, issue } => {
let msg = match reason {
Some(r) => format!("use of unstable library feature '{}': {}", feature, r),
None => format!("use of unstable library feature '{}'", &feature)
};
let msp: MultiSpan = span.into();
let cm = &self.sess.parse_sess.source_map();
let span_key = msp.primary_span().and_then(|sp: Span|
if !sp.is_dummy() {
let file = cm.lookup_char_pos(sp.lo()).file;
if file.name.is_macros() {
None
} else {
Some(span)
}
} else {
None
}
);
let error_id = (DiagnosticMessageId::StabilityId(issue), span_key, msg.clone());
let fresh = self.sess.one_time_diagnostics.borrow_mut().insert(error_id);
if fresh {
emit_feature_err(&self.sess.parse_sess, &feature.as_str(), span,
GateIssue::Library(Some(issue)), &msg);
}
}
EvalResult::Unmarked => {
// The API could be uncallable for other reasons, for example when a private module
// was referenced.
self.sess.delay_span_bug(span, &format!("encountered unmarked API: {:?}", def_id));
}
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> {
/// Because stability levels are scoped lexically, we want to walk
/// nested items in the context of the outer item, so enable
/// deep-walking.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::OnlyBodies(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item) {
match item.node {
hir::ItemKind::ExternCrate(_) => {
// compiler-generated `extern crate` items have a dummy span.
if item.span.is_dummy() { return }
let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id);
let cnum = match self.tcx.extern_mod_stmt_cnum(def_id) {
Some(cnum) => cnum,
None => return,
};
let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX };
self.tcx.check_stability(def_id, Some(item.hir_id), item.span);
}
// For implementations of traits, check the stability of each item
// individually as it's possible to have a stable trait with unstable
// items.
hir::ItemKind::Impl(.., Some(ref t), _, ref impl_item_refs) => {
if let Def::Trait(trait_did) = t.path.def {
for impl_item_ref in impl_item_refs {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
let trait_item_def_id = self.tcx.associated_items(trait_did)
.find(|item| item.ident.name == impl_item.ident.name)
.map(|item| item.def_id);
if let Some(def_id) = trait_item_def_id {
// Pass `None` to skip deprecation warnings.
self.tcx.check_stability(def_id, None, impl_item.span);
}
}
}
}
// There's no good place to insert stability check for non-Copy unions,
// so semi-randomly perform it here in stability.rs
hir::ItemKind::Union(..) if !self.tcx.features().untagged_unions => {
let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id);
let adt_def = self.tcx.adt_def(def_id);
let ty = self.tcx.type_of(def_id);
if adt_def.has_dtor(self.tcx) {
emit_feature_err(&self.tcx.sess.parse_sess,
"untagged_unions", item.span, GateIssue::Language,
"unions with `Drop` implementations are unstable");
} else {
let param_env = self.tcx.param_env(def_id);
if !param_env.can_type_implement_copy(self.tcx, ty).is_ok() {
emit_feature_err(&self.tcx.sess.parse_sess,
"untagged_unions", item.span, GateIssue::Language,
"unions with non-`Copy` fields are unstable");
}
}
}
_ => (/* pass */)
}
intravisit::walk_item(self, item);
}
fn visit_path(&mut self, path: &'tcx hir::Path, id: hir::HirId) {
if let Some(def_id) = path.def.opt_def_id() {
self.tcx.check_stability(def_id, Some(id), path.span)
}
intravisit::walk_path(self, path)
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn lookup_deprecation(self, id: DefId) -> Option<Deprecation> {
self.lookup_deprecation_entry(id).map(|depr| depr.attr)
}
}
/// Given the list of enabled features that were not language features (i.e., that
/// were expected to be library features), and the list of features used from
/// libraries, identify activated features that don't exist and error about them.
pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
if tcx.stability().staged_api[&LOCAL_CRATE] {
let krate = tcx.hir().krate();
let mut missing = MissingStabilityAnnotations {
tcx,
access_levels,
};
missing.check_missing_stability(hir::CRATE_HIR_ID, krate.span, "crate");
intravisit::walk_crate(&mut missing, krate);
krate.visit_all_item_likes(&mut missing.as_deep_visitor());
}
let declared_lang_features = &tcx.features().declared_lang_features;
let mut lang_features = FxHashSet::default();
for &(feature, span, since) in declared_lang_features {
if let Some(since) = since {
// Warn if the user has enabled an already-stable lang feature.
unnecessary_stable_feature_lint(tcx, span, feature, since);
}
if lang_features.contains(&feature) {
// Warn if the user enables a lang feature multiple times.
duplicate_feature_err(tcx.sess, span, feature);
}
lang_features.insert(feature);
}
let declared_lib_features = &tcx.features().declared_lib_features;
let mut remaining_lib_features = FxHashMap::default();
for (feature, span) in declared_lib_features {
if remaining_lib_features.contains_key(&feature) {
// Warn if the user enables a lib feature multiple times.
duplicate_feature_err(tcx.sess, *span, *feature);
}
remaining_lib_features.insert(feature, span.clone());
}
// `stdbuild` has special handling for `libc`, so we need to
// recognise the feature when building std.
// Likewise, libtest is handled specially, so `test` isn't
// available as we'd like it to be.
// FIXME: only remove `libc` when `stdbuild` is active.
// FIXME: remove special casing for `test`.
remaining_lib_features.remove(&Symbol::intern("libc"));
remaining_lib_features.remove(&Symbol::intern("test"));
let check_features =
|remaining_lib_features: &mut FxHashMap<_, _>, defined_features: &Vec<_>| {
for &(feature, since) in defined_features {
if let Some(since) = since {
if let Some(span) = remaining_lib_features.get(&feature) {
// Warn if the user has enabled an already-stable lib feature.
unnecessary_stable_feature_lint(tcx, *span, feature, since);
}
}
remaining_lib_features.remove(&feature);
if remaining_lib_features.is_empty() {
break;
}
}
};
// We always collect the lib features declared in the current crate, even if there are
// no unknown features, because the collection also does feature attribute validation.
let local_defined_features = tcx.lib_features().to_vec();
if !remaining_lib_features.is_empty() {
check_features(&mut remaining_lib_features, &local_defined_features);
for &cnum in &*tcx.crates() {
if remaining_lib_features.is_empty() {
break;
}
check_features(&mut remaining_lib_features, &tcx.defined_lib_features(cnum));
}
}
for (feature, span) in remaining_lib_features {
struct_span_err!(tcx.sess, span, E0635, "unknown feature `{}`", feature).emit();
}
// FIXME(#44232): the `used_features` table no longer exists, so we
// don't lint about unused features. We should reenable this one day!
}
fn unnecessary_stable_feature_lint<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
span: Span,
feature: Symbol,
since: Symbol
) {
tcx.lint_hir(lint::builtin::STABLE_FEATURES,
hir::CRATE_HIR_ID,
span,
&format!("the feature `{}` has been stable since {} and no longer requires \
an attribute to enable", feature, since));
}
fn duplicate_feature_err(sess: &Session, span: Span, feature: Symbol) {
struct_span_err!(sess, span, E0636, "the feature `{}` has already been declared", feature)
.emit();
}
| 41.365488 | 99 | 0.542571 |
112b8b406c7ec03b8b3bff01cf3f94332d2bb365 | 77 | use std::io;
pub(super) fn run() -> io::Result<()> {
unimplemented!()
}
| 12.833333 | 39 | 0.545455 |
7a10ac7004e7d865267a9ba8d61af3febf31550e | 2,454 | use std::fs::File;
use std::fs::OpenOptions;
use std::io::Write as _;
use std::path::Path;
use std::process::Command;
use std::process::Stdio;
type LocalDateTime = chrono::DateTime<chrono::Local>;
pub fn makeNewStagedFile(filePath: &Path, content: &str, repositoryDir: &Path)
{
makeNewUnstagedFile(filePath, content, repositoryDir);
stageFile(filePath, repositoryDir);
}
pub fn makeCommit(message: &str, repositoryDir: &Path)
{
let status = Command::new("git").args(&["commit", "-m", message])
.current_dir(&repositoryDir).stdout(Stdio::null()).status().unwrap();
assert_eq!(true, status.success(),
"Failed to create a commit with message \"{}\", command finished with {}", message, status);
}
pub fn findLastCommitDateForLogView(repoDir: &Path) -> String
{
// for date formatting below, see https://docs.rs/chrono/0.4.19/chrono/format/strftime/index.html
findLastCommitDate(repoDir).format("%_d %b %Y %_H:%M:%S").to_string()
}
// private
fn makeNewUnstagedFile(filePath: &Path, content: &str, repositoryDir: &Path)
{
let mut file = makeNewWritableFile(&repositoryDir.join(filePath));
file.write(content.as_bytes()).unwrap();
}
fn makeNewWritableFile(filePath: &Path) -> File
{
OpenOptions::new().write(true).create_new(true).open(filePath).unwrap()
}
fn stageFile(filePath: &Path, repositoryDir: &Path)
{
let status = Command::new("git").args(&["add", filePath.to_str().unwrap()])
.current_dir(&repositoryDir).status().unwrap();
assert_eq!(true, status.success(),
"Failed to stage file \"{:?}\", command finished with {}", filePath, status);
}
fn findLastCommitDate(repoDir: &Path) -> LocalDateTime
{
// --format=%cD means output contains only a commit date in RFC2822 format
// see https://git-scm.com/docs/git-log#Documentation/git-log.txt-emcdem
let output = getCommandStdoutString(&["git", "log", "-1", "--format=%cD"], repoDir).trim_end().to_owned();
chrono::DateTime::parse_from_rfc2822(&output).unwrap().into()
}
fn getCommandStdoutString(commandParts: &[&str], repositoryDir: &Path) -> String
{
String::from_utf8(getCommandResults(commandParts, repositoryDir).stdout).unwrap()
}
fn getCommandResults(commandParts: &[&str], repositoryDir: &Path) -> std::process::Output
{
let mut command = Command::new(commandParts[0]);
command.args(&commandParts[1..]).current_dir(&repositoryDir);
command.output().unwrap()
}
| 34.083333 | 110 | 0.689894 |
f8156c1bc5a2ed94946428a124f49280af41795e | 3,957 | mod utils;
mod backend;
use wasm_bindgen::prelude::*;
use web_sys::{Document, Element};
use serde::{Deserialize, Serialize};
use wasm_bindgen::JsCast;
use wasm_bindgen_futures::JsFuture;
use web_sys::{Request, RequestInit, RequestMode, Response, Window};
use feeder_types::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
const NAME: &str = "Feeder";
type JsResult<T> = Result<T, JsValue>;
#[wasm_bindgen]
pub async fn start() -> JsResult<()> {
utils::set_panic_hook();
let window = web_sys::window().expect("no global `window` exists");
let document = window.document().expect("should have a document on window");
let body = document.body().expect("document should have a body");
class(&body, "bg-gray-200")?;
let navbar = make_navbar(&document)?;
body.append_child(&navbar)?;
let content = make_content(&document, &window).await?;
body.append_child(&content)?;
Ok(())
}
async fn make_content(doc: &Document, window: &Window) -> JsResult<Element> {
let container = doc.create_element("div")?;
class(&container, "container mx-auto")?;
let heading = doc.create_element("h1")?;
heading.set_text_content(Some("All Feeds"));
class(&heading, "text-4xl my-10 mx-5 font-bold")?;
container.append_child(&heading)?;
let entries = backend::update(&window).await?;
let entries_list = make_entry_list(&doc, &entries)?;
container.append_child(&entries_list)?;
Ok(container)
}
fn make_entry_list(doc: &Document, entries: &[Entry]) -> JsResult<Element> {
let entry_list = doc.create_element("ul")?;
for entry in entries {
let entry_elem = make_entry(&doc, &entry)?;
entry_list.append_child(&entry_elem)?;
}
Ok(entry_list)
}
fn make_entry(doc: &Document, entry: &Entry) -> JsResult<Element> {
let entry_container = doc.create_element("li")?;
let entry_card = doc.create_element("div")?;
let border = if entry.new { "border-2 border-red-300" } else { "" };
let card_class = format!("{} {}",
"p-6 bg-white shadow-lg rounded-lg my-10",
border);
class(&entry_card,
&card_class)?;
let title = doc.create_element("a")?;
title.set_attribute("href", &entry.link)?;
title.set_text_content(Some(&entry.title));
class(&title, "text-gray-800 text-base font-semibold")?;
entry_card.append_child(&title)?;
let summary = doc.create_element("p")?;
summary.set_text_content(Some(&entry.summary));
class(&summary, "mt-2 text-gray-600 text-sm")?;
entry_card.append_child(&summary)?;
let details_class = "text-sm font-medium text-gray-400 px-2";
let source_container = doc.create_element("div")?;
class(&source_container,
"flex justify-end mt-2")?;
let source_text = doc.create_element("p")?;
class(&source_text, details_class)?;
source_text.set_text_content(Some(&entry.source));
let date = doc.create_element("p")?;
class(&date, details_class)?;
date.set_text_content(Some("12.12.2021 14:55"));
source_container.append_child(&source_text)?;
source_container.append_child(&date)?;
entry_card.append_child(&source_container)?;
entry_container.append_child(&entry_card)?;
Ok(entry_container)
}
fn make_navbar(doc: &Document) -> JsResult<Element> {
let nav = doc.create_element("nav")?;
class(&nav, "bg-black")?;
let container = doc.create_element("div")?;
class(&container, "max-w-7xl mx-auto px-2 sm:px-6 lg:px-8")?;
let text = doc.create_element("h1")?;
class(&text, "text-lg text-white p-2")?;
text.set_text_content(Some(NAME));
container.append_child(&text)?;
nav.append_child(&container)?;
Ok(nav)
}
fn class(elem: &Element, class: &str) -> JsResult<()> {
elem.set_attribute("class", class)
}
| 31.404762 | 80 | 0.666161 |
ef8f6b46742010e0338df5199f6a502df64c5af6 | 703 | /*
* @Date: 2021-10-02 08:34:24
* @Author: Mengsen Wang
* @LastEditors: Mengsen Wang
* @LastEditTime: 2021-10-02 09:05:13
*/
struct Solution;
impl Solution {
pub fn to_hex(num: i32) -> String {
if num == 0 {
return String::from("0");
}
let mut num = num as u32;
let mut ans = String::new();
let s = "0123456789abcdef".chars().collect::<Vec<_>>();
while num != 0 {
ans = s[(num & 0xf) as usize].to_string() + &ans;
num >>= 4;
}
ans
}
}
fn main() {
assert_eq!(Solution::to_hex(26), "1a");
assert_eq!(Solution::to_hex(-1), "ffffffff");
assert_eq!(Solution::to_hex(16), "10");
}
| 22.677419 | 63 | 0.516358 |
72226cb8cacff4b999b6f64e8ec941482ba30776 | 121 | pub mod address;
pub mod collection;
pub mod consts;
pub mod error;
pub mod mapping;
pub mod sequence;
pub mod variable;
| 15.125 | 19 | 0.768595 |
1c40f22c158bce66679af6a1b83cc2e679edb9d9 | 3,264 | use casper_execution_engine::core::engine_state::ExecutableDeployItem;
use casper_types::{bytesrepr::ToBytes, ContractPackageHash, RuntimeArgs};
use crate::error::Result;
/// Extension trait for `ExecutableDeployItem`, containing convenience constructors.
pub trait ExecutableDeployItemExt {
/// Creates an `ExecutableDeployItem::StoredContractByName`.
fn new_stored_contract_by_name(
name: String,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem>;
/// Creates an `ExecutableDeployItem::StoredContractByHash`.
fn new_stored_contract_by_hash(
hash: ContractPackageHash,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem>;
/// Creates an `ExecutableDeployItem::StoredVersionedContractByName`.
fn new_stored_versioned_contract_by_name(
name: String,
version: Option<u32>,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem>;
/// Creates an `ExecutableDeployItem::StoredVersionedContractByHash`.
fn new_stored_versioned_contract_by_hash(
hash: ContractPackageHash,
version: Option<u32>,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem>;
/// Creates an `ExecutableDeployItem::ModuleBytes`.
fn new_module_bytes(module_bytes: Vec<u8>, args: RuntimeArgs) -> Result<ExecutableDeployItem>;
}
impl ExecutableDeployItemExt for ExecutableDeployItem {
fn new_stored_contract_by_name(
name: String,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem> {
Ok(ExecutableDeployItem::StoredContractByName {
name,
entry_point,
args: args.to_bytes()?.into(),
})
}
fn new_stored_contract_by_hash(
hash: ContractPackageHash,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem> {
Ok(ExecutableDeployItem::StoredContractByHash {
hash,
entry_point,
args: args.to_bytes()?.into(),
})
}
fn new_stored_versioned_contract_by_name(
name: String,
version: Option<u32>,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem> {
Ok(ExecutableDeployItem::StoredVersionedContractByName {
name,
version, // defaults to highest enabled version
entry_point,
args: args.to_bytes()?.into(),
})
}
fn new_stored_versioned_contract_by_hash(
hash: ContractPackageHash,
version: Option<u32>,
entry_point: String,
args: RuntimeArgs,
) -> Result<ExecutableDeployItem> {
Ok(ExecutableDeployItem::StoredVersionedContractByHash {
hash,
version, // defaults to highest enabled version
entry_point,
args: args.to_bytes()?.into(),
})
}
fn new_module_bytes(module_bytes: Vec<u8>, args: RuntimeArgs) -> Result<ExecutableDeployItem> {
Ok(ExecutableDeployItem::ModuleBytes {
module_bytes: module_bytes.into(),
args: args.to_bytes()?.into(),
})
}
}
| 32 | 99 | 0.647672 |
50a670b87723826eacd976d93747d085f243f894 | 2,462 | // Test cases where a changing struct appears in the signature of fns
// and methods.
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-struct-signature
#![feature(rustc_attrs)]
#![allow(dead_code)]
#![allow(unused_variables)]
fn main() { }
#[rustc_if_this_changed]
struct WillChange {
x: u32,
y: u32
}
struct WontChange {
x: u32,
y: u32
}
// these are valid dependencies
mod signatures {
use WillChange;
#[rustc_then_this_would_need(type_of)] //~ ERROR no path
#[rustc_then_this_would_need(associated_item)] //~ ERROR no path
#[rustc_then_this_would_need(trait_def)] //~ ERROR no path
trait Bar {
#[rustc_then_this_would_need(fn_sig)] //~ ERROR OK
fn do_something(x: WillChange);
}
#[rustc_then_this_would_need(fn_sig)] //~ ERROR OK
#[rustc_then_this_would_need(typeck)] //~ ERROR OK
fn some_fn(x: WillChange) { }
#[rustc_then_this_would_need(fn_sig)] //~ ERROR OK
#[rustc_then_this_would_need(typeck)] //~ ERROR OK
fn new_foo(x: u32, y: u32) -> WillChange {
WillChange { x: x, y: y }
}
#[rustc_then_this_would_need(type_of)] //~ ERROR OK
impl WillChange {
#[rustc_then_this_would_need(fn_sig)] //~ ERROR OK
#[rustc_then_this_would_need(typeck)] //~ ERROR OK
fn new(x: u32, y: u32) -> WillChange { loop { } }
}
#[rustc_then_this_would_need(type_of)] //~ ERROR OK
impl WillChange {
#[rustc_then_this_would_need(fn_sig)] //~ ERROR OK
#[rustc_then_this_would_need(typeck)] //~ ERROR OK
fn method(&self, x: u32) { }
}
struct WillChanges {
#[rustc_then_this_would_need(type_of)] //~ ERROR OK
x: WillChange,
#[rustc_then_this_would_need(type_of)] //~ ERROR OK
y: WillChange
}
// The fields change, not the type itself.
#[rustc_then_this_would_need(type_of)] //~ ERROR no path
fn indirect(x: WillChanges) { }
}
mod invalid_signatures {
use WontChange;
#[rustc_then_this_would_need(type_of)] //~ ERROR no path
trait A {
#[rustc_then_this_would_need(fn_sig)] //~ ERROR no path
fn do_something_else_twice(x: WontChange);
}
#[rustc_then_this_would_need(fn_sig)] //~ ERROR no path
fn b(x: WontChange) { }
#[rustc_then_this_would_need(fn_sig)] //~ ERROR no path from `WillChange`
#[rustc_then_this_would_need(typeck)] //~ ERROR no path from `WillChange`
fn c(x: u32) { }
}
| 28.298851 | 82 | 0.651097 |
144529d8641eee4c56bb02f00b1b0fcedae6e18c | 8,338 | //! Tidy check to enforce rules about platform-specific code in std.
//!
//! This is intended to maintain existing standards of code
//! organization in hopes that the standard library will continue to
//! be refactored to isolate platform-specific bits, making porting
//! easier; where "standard library" roughly means "all the
//! dependencies of the std and test crates".
//!
//! This generally means placing restrictions on where `cfg(unix)`,
//! `cfg(windows)`, `cfg(target_os)` and `cfg(target_env)` may appear,
//! the basic objective being to isolate platform-specific code to the
//! platform-specific `std::sys` modules, and to the allocation,
//! unwinding, and libc crates.
//!
//! Following are the basic rules, though there are currently
//! exceptions:
//!
//! - core may not have platform-specific code.
//! - libpanic_abort may have platform-specific code.
//! - libpanic_unwind may have platform-specific code.
//! - libunwind may have platform-specific code.
//! - other crates in the std facade may not.
//! - std may have platform-specific code in the following places:
//! - `sys/unix/`
//! - `sys/windows/`
//! - `os/`
//!
//! `std/sys_common` should _not_ contain platform-specific code.
//! Finally, because std contains tests with platform-specific
//! `ignore` attributes, once the parser encounters `mod tests`,
//! platform-specific cfgs are allowed. Not sure yet how to deal with
//! this in the long term.
use std::iter::Iterator;
use std::path::Path;
// Paths that may contain platform-specific code.
const EXCEPTION_PATHS: &[&str] = &[
// std crates
"library/panic_abort",
"library/panic_unwind",
"library/unwind",
"library/std/src/sys/", // Platform-specific code for std lives here.
// This has the trailing slash so that sys_common is not excepted.
"library/std/src/os", // Platform-specific public interfaces
"library/rtstartup", // Not sure what to do about this. magic stuff for mingw
// Integration test for platform-specific run-time feature detection:
"library/std/tests/run-time-detect.rs",
"library/std/src/net/test.rs",
"library/std/src/net/addr",
"library/std/src/net/udp",
"library/std/src/sys_common/remutex.rs",
"library/std/src/sync/mutex.rs",
"library/std/src/sync/rwlock.rs",
"library/term", // Not sure how to make this crate portable, but test crate needs it.
"library/test", // Probably should defer to unstable `std::sys` APIs.
// std testing crates, okay for now at least
"library/core/tests",
"library/alloc/tests/lib.rs",
"library/alloc/benches/lib.rs",
// The `VaList` implementation must have platform specific code.
// The Windows implementation of a `va_list` is always a character
// pointer regardless of the target architecture. As a result,
// we must use `#[cfg(windows)]` to conditionally compile the
// correct `VaList` structure for windows.
"library/core/src/ffi.rs",
];
pub fn check(path: &Path, bad: &mut bool) {
// Sanity check that the complex parsing here works.
let mut saw_target_arch = false;
let mut saw_cfg_bang = false;
super::walk(path, &mut super::filter_dirs, &mut |entry, contents| {
let file = entry.path();
let filestr = file.to_string_lossy().replace("\\", "/");
if !filestr.ends_with(".rs") {
return;
}
let is_exception_path = EXCEPTION_PATHS.iter().any(|s| filestr.contains(&**s));
if is_exception_path {
return;
}
check_cfgs(contents, &file, bad, &mut saw_target_arch, &mut saw_cfg_bang);
});
assert!(saw_target_arch);
assert!(saw_cfg_bang);
}
fn check_cfgs(
contents: &str,
file: &Path,
bad: &mut bool,
saw_target_arch: &mut bool,
saw_cfg_bang: &mut bool,
) {
// For now it's ok to have platform-specific code after 'mod tests'.
let mod_tests_idx = find_test_mod(contents);
let contents = &contents[..mod_tests_idx];
// Pull out all `cfg(...)` and `cfg!(...)` strings.
let cfgs = parse_cfgs(contents);
let mut line_numbers: Option<Vec<usize>> = None;
let mut err = |idx: usize, cfg: &str| {
if line_numbers.is_none() {
line_numbers = Some(contents.match_indices('\n').map(|(i, _)| i).collect());
}
let line_numbers = line_numbers.as_ref().expect("");
let line = match line_numbers.binary_search(&idx) {
Ok(_) => unreachable!(),
Err(i) => i + 1,
};
tidy_error!(bad, "{}:{}: platform-specific cfg: {}", file.display(), line, cfg);
};
for (idx, cfg) in cfgs {
// Sanity check that the parsing here works.
if !*saw_target_arch && cfg.contains("target_arch") {
*saw_target_arch = true
}
if !*saw_cfg_bang && cfg.contains("cfg!") {
*saw_cfg_bang = true
}
let contains_platform_specific_cfg = cfg.contains("target_os")
|| cfg.contains("target_env")
|| cfg.contains("target_vendor")
|| cfg.contains("unix")
|| cfg.contains("windows");
if !contains_platform_specific_cfg {
continue;
}
let preceeded_by_doc_comment = {
let pre_contents = &contents[..idx];
let pre_newline = pre_contents.rfind('\n');
let pre_doc_comment = pre_contents.rfind("///");
match (pre_newline, pre_doc_comment) {
(Some(n), Some(c)) => n < c,
(None, Some(_)) => true,
(_, None) => false,
}
};
if preceeded_by_doc_comment {
continue;
}
err(idx, cfg);
}
}
fn find_test_mod(contents: &str) -> usize {
if let Some(mod_tests_idx) = contents.find("mod tests") {
// Also capture a previous line indicating that "mod tests" is cfg'd out.
let prev_newline_idx = contents[..mod_tests_idx].rfind('\n').unwrap_or(mod_tests_idx);
let prev_newline_idx = contents[..prev_newline_idx].rfind('\n');
if let Some(nl) = prev_newline_idx {
let prev_line = &contents[nl + 1..mod_tests_idx];
if prev_line.contains("cfg(all(test, not(target_os")
|| prev_line.contains("cfg(all(test, not(any(target_os")
{
nl
} else {
mod_tests_idx
}
} else {
mod_tests_idx
}
} else {
contents.len()
}
}
fn parse_cfgs<'a>(contents: &'a str) -> Vec<(usize, &'a str)> {
let candidate_cfgs = contents.match_indices("cfg");
let candidate_cfg_idxs = candidate_cfgs.map(|(i, _)| i);
// This is puling out the indexes of all "cfg" strings
// that appear to be tokens followed by a parenthesis.
let cfgs = candidate_cfg_idxs.filter(|i| {
let pre_idx = i.saturating_sub(*i);
let succeeds_non_ident = !contents
.as_bytes()
.get(pre_idx)
.cloned()
.map(char::from)
.map(char::is_alphanumeric)
.unwrap_or(false);
let contents_after = &contents[*i..];
let first_paren = contents_after.find('(');
let paren_idx = first_paren.map(|ip| i + ip);
let preceeds_whitespace_and_paren = paren_idx
.map(|ip| {
let maybe_space = &contents[*i + "cfg".len()..ip];
maybe_space.chars().all(|c| char::is_whitespace(c) || c == '!')
})
.unwrap_or(false);
succeeds_non_ident && preceeds_whitespace_and_paren
});
cfgs.flat_map(|i| {
let mut depth = 0;
let contents_from = &contents[i..];
for (j, byte) in contents_from.bytes().enumerate() {
match byte {
b'(' => {
depth += 1;
}
b')' => {
depth -= 1;
if depth == 0 {
return Some((i, &contents_from[..=j]));
}
}
_ => {}
}
}
// if the parentheses are unbalanced just ignore this cfg -- it'll be caught when attempting
// to run the compiler, and there's no real reason to lint it separately here
None
})
.collect()
}
| 36.41048 | 100 | 0.592948 |
18b7b98cb8218cce2b532370c0bfd5acac7b5a85 | 11,060 | use sp_runtime::traits::{Convert, Saturating};
use sp_runtime::{Fixed64, Perbill};
use frame_support::{traits::{OnUnbalanced, Currency, Get}, weights::Weight};
use crate::{Balances, Balance, System, Authorship, MaximumBlockWeight, NegativeImbalance};
pub struct Author;
impl OnUnbalanced<NegativeImbalance> for Author {
fn on_nonzero_unbalanced(amount: NegativeImbalance) {
Balances::resolve_creating(&Authorship::author(), amount);
}
}
/// Struct that handles the conversion of Balance -> `u64`. This is used for staking's election
/// calculation.
pub struct CurrencyToVoteHandler;
impl CurrencyToVoteHandler {
fn factor() -> Balance { (Balances::total_issuance() / u64::max_value() as Balance).max(1) }
}
impl Convert<Balance, u64> for CurrencyToVoteHandler {
fn convert(x: Balance) -> u64 { (x / Self::factor()) as u64 }
}
impl Convert<u128, Balance> for CurrencyToVoteHandler {
fn convert(x: u128) -> Balance { x * Self::factor() }
}
/// Convert from weight to balance via a simple coefficient multiplication
/// The associated type C encapsulates a constant in units of balance per weight
pub struct LinearWeightToFee<C>(sp_std::marker::PhantomData<C>);
impl<C: Get<Balance>> Convert<Weight, Balance> for LinearWeightToFee<C> {
fn convert(w: Weight) -> Balance {
// substrate-node a weight of 10_000 (smallest non-zero weight) to be mapped to 10^7 units of
// fees, hence:
let coefficient = C::get();
Balance::from(w).saturating_mul(coefficient)
}
}
/// Update the given multiplier based on the following formula
///
/// diff = (previous_block_weight - target_weight)
/// v = 0.00004
/// next_weight = weight * (1 + (v . diff) + (v . diff)^2 / 2)
///
/// Where `target_weight` must be given as the `Get` implementation of the `T` generic type.
/// https://research.web3.foundation/en/latest/polkadot/Token%20Economics/#relay-chain-transaction-fees
pub struct TargetedFeeAdjustment<T>(sp_std::marker::PhantomData<T>);
impl<T: Get<Perbill>> Convert<Fixed64, Fixed64> for TargetedFeeAdjustment<T> {
fn convert(multiplier: Fixed64) -> Fixed64 {
let block_weight = System::all_extrinsics_weight();
let max_weight = MaximumBlockWeight::get();
let target_weight = (T::get() * max_weight) as u128;
let block_weight = block_weight as u128;
// determines if the first_term is positive
let positive = block_weight >= target_weight;
let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight);
// diff is within u32, safe.
let diff = Fixed64::from_rational(diff_abs as i64, max_weight as u64);
let diff_squared = diff.saturating_mul(diff);
// 0.00004 = 4/100_000 = 40_000/10^9
let v = Fixed64::from_rational(4, 100_000);
// 0.00004^2 = 16/10^10 ~= 2/10^9. Taking the future /2 into account, then it is just 1
// parts from a billionth.
let v_squared_2 = Fixed64::from_rational(1, 1_000_000_000);
let first_term = v.saturating_mul(diff);
// It is very unlikely that this will exist (in our poor perbill estimate) but we are giving
// it a shot.
let second_term = v_squared_2.saturating_mul(diff_squared);
if positive {
// Note: this is merely bounded by how big the multiplier and the inner value can go,
// not by any economical reasoning.
let excess = first_term.saturating_add(second_term);
multiplier.saturating_add(excess)
} else {
// Proof: first_term > second_term. Safe subtraction.
let negative = first_term - second_term;
multiplier.saturating_sub(negative)
// despite the fact that apply_to saturates weight (final fee cannot go below 0)
// it is crucially important to stop here and don't further reduce the weight fee
// multiplier. While at -1, it means that the network is so un-congested that all
// transactions have no weight fee. We stop here and only increase if the network
// became more busy.
.max(Fixed64::from_rational(-1, 1))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::assert_eq_error_rate;
use crate::{MaximumBlockWeight, AvailableBlockRatio, Runtime};
use crate::{constants::currency::*, TransactionPayment, TargetBlockFullness};
use frame_support::weights::Weight;
fn max() -> Weight {
MaximumBlockWeight::get()
}
fn target() -> Weight {
TargetBlockFullness::get() * max()
}
// poc reference implementation.
fn fee_multiplier_update(block_weight: Weight, previous: Fixed64) -> Fixed64 {
let block_weight = block_weight as f32;
let v: f32 = 0.00004;
// maximum tx weight
let m = max() as f32;
// Ideal saturation in terms of weight
let ss = target() as f32;
// Current saturation in terms of weight
let s = block_weight;
let fm = v * (s/m - ss/m) + v.powi(2) * (s/m - ss/m).powi(2) / 2.0;
let addition_fm = Fixed64::from_parts((fm * 1_000_000_000_f32).round() as i64);
previous.saturating_add(addition_fm)
}
fn feemul(parts: i64) -> Fixed64 {
Fixed64::from_parts(parts)
}
fn run_with_system_weight<F>(w: Weight, assertions: F) where F: Fn() -> () {
let mut t: sp_io::TestExternalities =
system::GenesisConfig::default().build_storage::<Runtime>().unwrap().into();
t.execute_with(|| {
System::set_block_limits(w, 0);
assertions()
});
}
#[test]
fn fee_multiplier_update_poc_works() {
let fm = Fixed64::from_rational(0, 1);
let test_set = vec![
(0, fm.clone()),
(100, fm.clone()),
(target(), fm.clone()),
(max() / 2, fm.clone()),
(max(), fm.clone()),
];
test_set.into_iter().for_each(|(w, fm)| {
run_with_system_weight(w, || {
assert_eq_error_rate!(
fee_multiplier_update(w, fm).into_inner(),
TargetedFeeAdjustment::<TargetBlockFullness>::convert(fm).into_inner(),
5,
);
})
})
}
#[test]
fn empty_chain_simulation() {
// just a few txs per_block.
let block_weight = 0;
run_with_system_weight(block_weight, || {
let mut fm = Fixed64::default();
let mut iterations: u64 = 0;
loop {
let next = TargetedFeeAdjustment::<TargetBlockFullness>::convert(fm);
fm = next;
if fm == Fixed64::from_rational(-1, 1) { break; }
iterations += 1;
}
println!("iteration {}, new fm = {:?}. Weight fee is now zero", iterations, fm);
assert!(iterations > 50_000, "This assertion is just a warning; Don't panic. \
Current substrate/polkadot node are configured with a _slow adjusting fee_ \
mechanism. Hence, it is really unlikely that fees collapse to zero even on an \
empty chain in less than at least of couple of thousands of empty blocks. But this \
simulation indicates that fees collapsed to zero after {} almost-empty blocks. \
Check it",
iterations,
);
})
}
#[test]
#[ignore] // for it is a time consuming test
fn congested_chain_simulation() {
// `cargo test congested_chain_simulation -- --nocapture` to get some insight.
// almost full. The entire quota of normal transactions is taken.
let block_weight = AvailableBlockRatio::get() * max() - 100;
// Default substrate minimum.
let tx_weight = 10_000;
run_with_system_weight(block_weight, || {
// initial value configured on module
let mut fm = Fixed64::default();
assert_eq!(fm, TransactionPayment::next_fee_multiplier());
let mut iterations: u64 = 0;
loop {
let next = TargetedFeeAdjustment::<TargetBlockFullness>::convert(fm);
// if no change, panic. This should never happen in this case.
if fm == next { panic!("The fee should ever increase"); }
fm = next;
iterations += 1;
let fee = <Runtime as pallet_transaction_payment::Trait>::WeightToFee::convert(tx_weight);
let adjusted_fee = fm.saturated_multiply_accumulate(fee);
println!(
"iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \
{} cents, {} dollars",
iterations,
fm,
adjusted_fee,
adjusted_fee / MILLICENTS,
adjusted_fee / CENTS,
adjusted_fee / DOLLARS,
);
}
});
}
#[test]
fn stateless_weight_mul() {
run_with_system_weight(target() / 4, || {
// Light block. Fee is reduced a little.
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default()),
feemul(-7500),
);
});
run_with_system_weight(target() / 2, || {
// a bit more. Fee is decreased less, meaning that the fee increases as the block grows.
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default()),
feemul(-5000),
);
});
run_with_system_weight(target(), || {
// ideal. Original fee. No changes.
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default()),
feemul(0),
);
});
run_with_system_weight(target() * 2, || {
// // More than ideal. Fee is increased.
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default()),
feemul(10000),
);
});
}
#[test]
fn stateful_weight_mul_grow_to_infinity() {
run_with_system_weight(target() * 2, || {
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default()),
feemul(10000)
);
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(feemul(10000)),
feemul(20000)
);
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(feemul(20000)),
feemul(30000)
);
// ...
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(feemul(1_000_000_000)),
feemul(1_000_000_000 + 10000)
);
});
}
#[test]
fn stateful_weight_mil_collapse_to_minus_one() {
run_with_system_weight(0, || {
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default()),
feemul(-10000)
);
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(feemul(-10000)),
feemul(-20000)
);
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(feemul(-20000)),
feemul(-30000)
);
// ...
assert_eq!(
TargetedFeeAdjustment::<TargetBlockFullness>::convert(feemul(1_000_000_000 * -1)),
feemul(-1_000_000_000)
);
})
}
#[test]
fn weight_to_fee_should_not_overflow_on_large_weights() {
let kb = 1024 as Weight;
let mb = kb * kb;
let max_fm = Fixed64::from_natural(i64::max_value());
// check that for all values it can compute, correctly.
vec![
0,
1,
10,
1000,
kb,
10 * kb,
100 * kb,
mb,
10 * mb,
Weight::max_value() / 2,
Weight::max_value(),
].into_iter().for_each(|i| {
run_with_system_weight(i, || {
let next = TargetedFeeAdjustment::<TargetBlockFullness>::convert(Fixed64::default());
let truth = fee_multiplier_update(i, Fixed64::default());
assert_eq_error_rate!(truth.into_inner(), next.into_inner(), 5);
});
});
// Some values that are all above the target and will cause an increase.
let t = target();
vec![t + 100, t * 2, t * 4]
.into_iter()
.for_each(|i| {
run_with_system_weight(i, || {
let fm = TargetedFeeAdjustment::<TargetBlockFullness>::convert(max_fm);
// won't grow. The convert saturates everything.
assert_eq!(fm, max_fm);
})
});
}
} | 32.057971 | 103 | 0.681736 |
214f0bd0f0686eb289de79bc8b587d1a11244377 | 33,508 | #![doc(html_root_url = "https://docs.rs/prost-build/0.6.1")]
#![allow(clippy::option_as_ref_deref)]
//! `prost-build` compiles `.proto` files into Rust.
//!
//! `prost-build` is designed to be used for build-time code generation as part of a Cargo
//! build-script.
//!
//! ## Example
//!
//! Let's create a small crate, `snazzy`, that defines a collection of
//! snazzy new items in a protobuf file.
//!
//! ```bash
//! $ cargo new snazzy && cd snazzy
//! ```
//!
//! First, add `prost-build`, `prost` and its public dependencies to `Cargo.toml`
//! (see [crates.io](https://crates.io/crates/prost) for the current versions):
//!
//! ```toml
//! [dependencies]
//! bytes = <bytes-version>
//! prost = <prost-version>
//!
//! [build-dependencies]
//! prost-build = { version = <prost-version> }
//! ```
//!
//! Next, add `src/items.proto` to the project:
//!
//! ```proto
//! syntax = "proto3";
//!
//! package snazzy.items;
//!
//! // A snazzy new shirt!
//! message Shirt {
//! enum Size {
//! SMALL = 0;
//! MEDIUM = 1;
//! LARGE = 2;
//! }
//!
//! string color = 1;
//! Size size = 2;
//! }
//! ```
//!
//! To generate Rust code from `items.proto`, we use `prost-build` in the crate's
//! `build.rs` build-script:
//!
//! ```rust,no_run
//! # use std::io::Result;
//! fn main() -> Result<()> {
//! prost_build::compile_protos(&["src/items.proto"], &["src/"])?;
//! Ok(())
//! }
//! ```
//!
//! And finally, in `lib.rs`, include the generated code:
//!
//! ```rust,ignore
//! // Include the `items` module, which is generated from items.proto.
//! pub mod items {
//! include!(concat!(env!("OUT_DIR"), "/snazzy.items.rs"));
//! }
//!
//! pub fn create_large_shirt(color: String) -> items::Shirt {
//! let mut shirt = items::Shirt::default();
//! shirt.color = color;
//! shirt.set_size(items::shirt::Size::Large);
//! shirt
//! }
//! ```
//!
//! That's it! Run `cargo doc` to see documentation for the generated code. The full
//! example project can be found on [GitHub](https://github.com/danburkert/snazzy).
//!
//! ## Sourcing `protoc`
//!
//! `prost-build` depends on the Protocol Buffers compiler, `protoc`, to parse `.proto` files into
//! a representation that can be transformed into Rust. If set, `prost-build` uses the `PROTOC` and
//! `PROTOC_INCLUDE` environment variables for locating `protoc` and the Protobuf includes
//! directory. For example, on a macOS system where Protobuf is installed with Homebrew, set the
//! environment to:
//!
//! ```bash
//! PROTOC=/usr/local/bin/protoc
//! PROTOC_INCLUDE=/usr/local/include
//! ```
//!
//! and in a typical Linux installation:
//!
//! ```bash
//! PROTOC=/usr/bin/protoc
//! PROTOC_INCLUDE=/usr/include
//! ```
//!
//! If `PROTOC` is not found in the environment, then a pre-compiled `protoc` binary bundled in
//! the prost-build crate is used. Pre-compiled `protoc` binaries exist for Linux, macOS, and
//! Windows systems. If no pre-compiled `protoc` is available for the host platform, then the
//! `protoc` or `protoc.exe` binary on the `PATH` is used. If `protoc` is not available in any of
//! these fallback locations, then the build fails.
//!
//! If `PROTOC_INCLUDE` is not found in the environment, then the Protobuf include directory bundled
//! in the prost-build crate is be used.
mod ast;
mod code_generator;
mod extern_paths;
mod ident;
mod message_graph;
use std::collections::HashMap;
use std::default;
use std::env;
use std::ffi::{OsStr, OsString};
use std::fmt;
use std::fs;
use std::io::{Error, ErrorKind, Result};
use std::path::{Path, PathBuf};
use std::process::Command;
use log::trace;
use prost::Message;
use prost_types::{FileDescriptorProto, FileDescriptorSet};
pub use crate::ast::{Comments, Method, Service};
use crate::code_generator::CodeGenerator;
use crate::extern_paths::ExternPaths;
use crate::ident::to_snake;
use crate::message_graph::MessageGraph;
type Module = Vec<String>;
/// A service generator takes a service descriptor and generates Rust code.
///
/// `ServiceGenerator` can be used to generate application-specific interfaces
/// or implementations for Protobuf service definitions.
///
/// Service generators are registered with a code generator using the
/// `Config::service_generator` method.
///
/// A viable scenario is that an RPC framework provides a service generator. It generates a trait
/// describing methods of the service and some glue code to call the methods of the trait, defining
/// details like how errors are handled or if it is asynchronous. Then the user provides an
/// implementation of the generated trait in the application code and plugs it into the framework.
///
/// Such framework isn't part of Prost at present.
pub trait ServiceGenerator {
/// Generates a Rust interface or implementation for a service, writing the
/// result to `buf`.
fn generate(&mut self, service: Service, buf: &mut String);
/// Finalizes the generation process.
///
/// In case there's something that needs to be output at the end of the generation process, it
/// goes here. Similar to [`generate`](#method.generate), the output should be appended to
/// `buf`.
///
/// An example can be a module or other thing that needs to appear just once, not for each
/// service generated.
///
/// This still can be called multiple times in a lifetime of the service generator, because it
/// is called once per `.proto` file.
///
/// The default implementation is empty and does nothing.
fn finalize(&mut self, _buf: &mut String) {}
/// Finalizes the generation process for an entire protobuf package.
///
/// This differs from [`finalize`](#method.finalize) by where (and how often) it is called
/// during the service generator life cycle. This method is called once per protobuf package,
/// making it ideal for grouping services within a single package spread across multiple
/// `.proto` files.
///
/// The default implementation is empty and does nothing.
fn finalize_package(&mut self, _package: &str, _buf: &mut String) {}
}
/// Configuration options for Protobuf code generation.
///
/// This configuration builder can be used to set non-default code generation options.
pub struct Config {
service_generator: Option<Box<dyn ServiceGenerator>>,
btree_map: Vec<String>,
bytes: Vec<String>,
type_attributes: Vec<(String, String)>,
field_attributes: Vec<(String, String)>,
field_type_attributes: Vec<(String, String)>,
prost_types: bool,
strip_enum_prefix: bool,
out_dir: Option<PathBuf>,
extern_paths: Vec<(String, String)>,
protoc_args: Vec<OsString>,
disable_comments: bool,
}
impl Config {
/// Creates a new code generator configuration with default options.
pub fn new() -> Config {
Config::default()
}
/// Configure the code generator to generate Rust [`BTreeMap`][1] fields for Protobuf
/// [`map`][2] type fields.
///
/// # Arguments
///
/// **`paths`** - paths to specific fields, messages, or packages which should use a Rust
/// `BTreeMap` for Protobuf `map` fields. Paths are specified in terms of the Protobuf type
/// name (not the generated Rust type name). Paths with a leading `.` are treated as fully
/// qualified names. Paths without a leading `.` are treated as relative, and are suffix
/// matched on the fully qualified field name. If a Protobuf map field matches any of the
/// paths, a Rust `BTreeMap` field is generated instead of the default [`HashMap`][3].
///
/// The matching is done on the Protobuf names, before converting to Rust-friendly casing
/// standards.
///
/// # Examples
///
/// ```rust
/// # let mut config = prost_build::Config::new();
/// // Match a specific field in a message type.
/// config.btree_map(&[".my_messages.MyMessageType.my_map_field"]);
///
/// // Match all map fields in a message type.
/// config.btree_map(&[".my_messages.MyMessageType"]);
///
/// // Match all map fields in a package.
/// config.btree_map(&[".my_messages"]);
///
/// // Match all map fields. Expecially useful in `no_std` contexts.
/// config.btree_map(&["."]);
///
/// // Match all map fields in a nested message.
/// config.btree_map(&[".my_messages.MyMessageType.MyNestedMessageType"]);
///
/// // Match all fields named 'my_map_field'.
/// config.btree_map(&["my_map_field"]);
///
/// // Match all fields named 'my_map_field' in messages named 'MyMessageType', regardless of
/// // package or nesting.
/// config.btree_map(&["MyMessageType.my_map_field"]);
///
/// // Match all fields named 'my_map_field', and all fields in the 'foo.bar' package.
/// config.btree_map(&["my_map_field", ".foo.bar"]);
/// ```
///
/// [1]: https://doc.rust-lang.org/std/collections/struct.BTreeMap.html
/// [2]: https://developers.google.com/protocol-buffers/docs/proto3#maps
/// [3]: https://doc.rust-lang.org/std/collections/struct.HashMap.html
pub fn btree_map<I, S>(&mut self, paths: I) -> &mut Self
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
self.btree_map = paths.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
/// Configure the code generator to generate Rust [`bytes::Bytes`][1] fields for Protobuf
/// [`bytes`][2] type fields.
///
/// # Arguments
///
/// **`paths`** - paths to specific fields, messages, or packages which should use a Rust
/// `Bytes` for Protobuf `bytes` fields. Paths are specified in terms of the Protobuf type
/// name (not the generated Rust type name). Paths with a leading `.` are treated as fully
/// qualified names. Paths without a leading `.` are treated as relative, and are suffix
/// matched on the fully qualified field name. If a Protobuf map field matches any of the
/// paths, a Rust `Bytes` field is generated instead of the default [`Vec<u8>`][3].
///
/// The matching is done on the Protobuf names, before converting to Rust-friendly casing
/// standards.
///
/// # Examples
///
/// ```rust
/// # let mut config = prost_build::Config::new();
/// // Match a specific field in a message type.
/// config.bytes(&[".my_messages.MyMessageType.my_bytes_field"]);
///
/// // Match all bytes fields in a message type.
/// config.bytes(&[".my_messages.MyMessageType"]);
///
/// // Match all bytes fields in a package.
/// config.bytes(&[".my_messages"]);
///
/// // Match all bytes fields. Expecially useful in `no_std` contexts.
/// config.bytes(&["."]);
///
/// // Match all bytes fields in a nested message.
/// config.bytes(&[".my_messages.MyMessageType.MyNestedMessageType"]);
///
/// // Match all fields named 'my_bytes_field'.
/// config.bytes(&["my_bytes_field"]);
///
/// // Match all fields named 'my_bytes_field' in messages named 'MyMessageType', regardless of
/// // package or nesting.
/// config.bytes(&["MyMessageType.my_bytes_field"]);
///
/// // Match all fields named 'my_bytes_field', and all fields in the 'foo.bar' package.
/// config.bytes(&["my_bytes_field", ".foo.bar"]);
/// ```
///
/// [1]: https://docs.rs/bytes/latest/bytes/struct.Bytes.html
/// [2]: https://developers.google.com/protocol-buffers/docs/proto3#scalar
/// [3]: https://doc.rust-lang.org/std/vec/struct.Vec.html
pub fn bytes<I, S>(&mut self, paths: I) -> &mut Self
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
self.bytes = paths.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
/// Add additional attribute to matched fields.
///
/// # Arguments
///
/// **`path`** - a patch matching any number of fields. These fields get the attribute.
/// For details about matching fields see [`btree_map`](#method.btree_map).
///
/// **`attribute`** - an arbitrary string that'll be placed before each matched field. The
/// expected usage are additional attributes, usually in concert with whole-type
/// attributes set with [`type_attribute`](method.type_attribute), but it is not
/// checked and anything can be put there.
///
/// Note that the calls to this method are cumulative ‒ if multiple paths from multiple calls
/// match the same field, the field gets all the corresponding attributes.
///
/// # Examples
///
/// ```rust
/// # let mut config = prost_build::Config::new();
/// // Prost renames fields named `in` to `in_`. But if serialized through serde,
/// // they should as `in`.
/// config.field_attribute("in", "#[serde(rename = \"in\")]");
/// ```
pub fn field_attribute<P, A>(&mut self, path: P, attribute: A) -> &mut Self
where
P: AsRef<str>,
A: AsRef<str>,
{
self.field_attributes
.push((path.as_ref().to_string(), attribute.as_ref().to_string()));
self
}
pub fn field_type_attribute<P, A>(&mut self, path: P, attribute: A) -> &mut Self
where
P: AsRef<str>,
A: AsRef<str>,
{
self.field_type_attributes
.push((path.as_ref().to_string(), attribute.as_ref().to_string()));
self
}
/// Add additional attribute to matched messages, enums and one-ofs.
///
/// # Arguments
///
/// **`paths`** - a path matching any number of types. It works the same way as in
/// [`btree_map`](#method.btree_map), just with the field name omitted.
///
/// **`attribute`** - an arbitrary string to be placed before each matched type. The
/// expected usage are additional attributes, but anything is allowed.
///
/// The calls to this method are cumulative. They don't overwrite previous calls and if a
/// type is matched by multiple calls of the method, all relevant attributes are added to
/// it.
///
/// For things like serde it might be needed to combine with [field
/// attributes](#method.field_attribute).
///
/// # Examples
///
/// ```rust
/// # let mut config = prost_build::Config::new();
/// // Nothing around uses floats, so we can derive real `Eq` in addition to `PartialEq`.
/// config.type_attribute(".", "#[derive(Eq)]");
/// // Some messages want to be serializable with serde as well.
/// config.type_attribute("my_messages.MyMessageType",
/// "#[derive(Serialize)] #[serde(rename-all = \"snake_case\")]");
/// config.type_attribute("my_messages.MyMessageType.MyNestedMessageType",
/// "#[derive(Serialize)] #[serde(rename-all = \"snake_case\")]");
/// ```
///
/// # Oneof fields
///
/// The `oneof` fields don't have a type name of their own inside Protobuf. Therefore, the
/// field name can be used both with `type_attribute` and `field_attribute` ‒ the first is
/// placed before the `enum` type definition, the other before the field inside corresponding
/// message `struct`.
///
/// In other words, to place an attribute on the `enum` implementing the `oneof`, the match
/// would look like `my_messages.MyMessageType.oneofname`.
pub fn type_attribute<P, A>(&mut self, path: P, attribute: A) -> &mut Self
where
P: AsRef<str>,
A: AsRef<str>,
{
self.type_attributes
.push((path.as_ref().to_string(), attribute.as_ref().to_string()));
self
}
/// Configures the code generator to use the provided service generator.
pub fn service_generator(&mut self, service_generator: Box<dyn ServiceGenerator>) -> &mut Self {
self.service_generator = Some(service_generator);
self
}
/// Configures the code generator to not use the `prost_types` crate for Protobuf well-known
/// types, and instead generate Protobuf well-known types from their `.proto` definitions.
pub fn compile_well_known_types(&mut self) -> &mut Self {
self.prost_types = false;
self
}
/// Declare an externally provided Protobuf package or type.
///
/// `extern_path` allows `prost` types in external crates to be referenced in generated code.
///
/// When `prost` compiles a `.proto` which includes an import of another `.proto`, it will
/// automatically recursively compile the imported file as well. `extern_path` can be used
/// to instead substitute types from an external crate.
///
/// # Example
///
/// As an example, consider a crate, `uuid`, with a `prost`-generated `Uuid` type:
///
/// ```proto
/// // uuid.proto
///
/// syntax = "proto3";
/// package uuid;
///
/// message Uuid {
/// string uuid_str = 1;
/// }
/// ```
///
/// The `uuid` crate implements some traits for `Uuid`, and publicly exports it:
///
/// ```rust,ignore
/// // lib.rs in the uuid crate
///
/// include!(concat!(env!("OUT_DIR"), "/uuid.rs"));
///
/// pub trait DoSomething {
/// fn do_it(&self);
/// }
///
/// impl DoSomething for Uuid {
/// fn do_it(&self) {
/// println!("Done");
/// }
/// }
/// ```
///
/// A separate crate, `my_application`, uses `prost` to generate message types which reference
/// `Uuid`:
///
/// ```proto
/// // my_application.proto
///
/// syntax = "proto3";
/// package my_application;
///
/// import "uuid.proto";
///
/// message MyMessage {
/// uuid.Uuid message_id = 1;
/// string some_payload = 2;
/// }
/// ```
///
/// Additionally, `my_application` depends on the trait impls provided by the `uuid` crate:
///
/// ```rust,ignore
/// // `main.rs` of `my_application`
///
/// use uuid::{DoSomething, Uuid};
///
/// include!(concat!(env!("OUT_DIR"), "/my_application.rs"));
///
/// pub fn process_message(msg: MyMessage) {
/// if let Some(uuid) = msg.message_id {
/// uuid.do_it();
/// }
/// }
/// ```
///
/// Without configuring `uuid` as an external path in `my_application`'s `build.rs`, `prost`
/// would compile a completely separate version of the `Uuid` type, and `process_message` would
/// fail to compile. However, if `my_application` configures `uuid` as an extern path with a
/// call to `.extern_path(".uuid", "::uuid")`, `prost` will use the external type instead of
/// compiling a new version of `Uuid`. Note that the configuration could also be specified as
/// `.extern_path(".uuid.Uuid", "::uuid::Uuid")` if only the `Uuid` type were externally
/// provided, and not the whole `uuid` package.
///
/// # Usage
///
/// `extern_path` takes a fully-qualified Protobuf path, and the corresponding Rust path that
/// it will be substituted with in generated code. The Protobuf path can refer to a package or
/// a type, and the Rust path should correspondingly refer to a Rust module or type.
///
/// ```rust
/// # let mut config = prost_build::Config::new();
/// // Declare the `uuid` Protobuf package and all nested packages and types as externally
/// // provided by the `uuid` crate.
/// config.extern_path(".uuid", "::uuid");
///
/// // Declare the `foo.bar.baz` Protobuf package and all nested packages and types as
/// // externally provided by the `foo_bar_baz` crate.
/// config.extern_path(".foo.bar.baz", "::foo_bar_baz");
///
/// // Declare the `uuid.Uuid` Protobuf type (and all nested types) as externally provided
/// // by the `uuid` crate's `Uuid` type.
/// config.extern_path(".uuid.Uuid", "::uuid::Uuid");
/// ```
pub fn extern_path<P1, P2>(&mut self, proto_path: P1, rust_path: P2) -> &mut Self
where
P1: Into<String>,
P2: Into<String>,
{
self.extern_paths
.push((proto_path.into(), rust_path.into()));
self
}
/// Configures the code generator to not strip the enum name from variant names.
///
/// Protobuf enum definitions commonly include the enum name as a prefix of every variant name.
/// This style is non-idiomatic in Rust, so by default `prost` strips the enum name prefix from
/// variants which include it. Configuring this option prevents `prost` from stripping the
/// prefix.
pub fn retain_enum_prefix(&mut self) -> &mut Self {
self.strip_enum_prefix = false;
self
}
/// Configures the output directory where generated Rust files will be written.
///
/// If unset, defaults to the `OUT_DIR` environment variable. `OUT_DIR` is set by Cargo when
/// executing build scripts, so `out_dir` typically does not need to be configured.
pub fn out_dir<P>(&mut self, path: P) -> &mut Self
where
P: Into<PathBuf>,
{
self.out_dir = Some(path.into());
self
}
/// Add an argument to the `protoc` protobuf compilation invocation.
///
/// # Example `build.rs`
///
/// ```rust,no_run
/// # use std::io::Result;
/// fn main() -> Result<()> {
/// let mut prost_build = prost_build::Config::new();
/// // Enable a protoc experimental feature.
/// prost_build.protoc_arg("--experimental_allow_proto3_optional");
/// prost_build.compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?;
/// Ok(())
/// }
/// ```
pub fn protoc_arg<S>(&mut self, arg: S) -> &mut Self
where
S: AsRef<OsStr>,
{
self.protoc_args.push(arg.as_ref().to_owned());
self
}
pub fn disable_comments(&mut self) -> &mut Self {
self.disable_comments = true;
self
}
/// Compile `.proto` files into Rust files during a Cargo build with additional code generator
/// configuration options.
///
/// This method is like the `prost_build::compile_protos` function, with the added ability to
/// specify non-default code generation options. See that function for more information about
/// the arguments and generated outputs.
///
/// # Example `build.rs`
///
/// ```rust,no_run
/// # use std::io::Result;
/// fn main() -> Result<()> {
/// let mut prost_build = prost_build::Config::new();
/// prost_build.btree_map(&["."]);
/// prost_build.compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?;
/// Ok(())
/// }
/// ```
pub fn compile_protos<P>(&mut self, protos: &[P], includes: &[P]) -> Result<()>
where
P: AsRef<Path>,
{
let target: PathBuf = self.out_dir.clone().map(Ok).unwrap_or_else(|| {
env::var_os("OUT_DIR")
.ok_or_else(|| {
Error::new(ErrorKind::Other, "OUT_DIR environment variable is not set")
})
.map(Into::into)
})?;
// TODO: This should probably emit 'rerun-if-changed=PATH' directives for cargo, however
// according to [1] if any are output then those paths replace the default crate root,
// which is undesirable. Figure out how to do it in an additive way; perhaps gcc-rs has
// this figured out.
// [1]: http://doc.crates.io/build-script.html#outputs-of-the-build-script
let tmp = tempfile::Builder::new().prefix("prost-build").tempdir()?;
let descriptor_set = tmp.path().join("prost-descriptor-set");
let mut cmd = Command::new(protoc());
cmd.arg("--include_imports")
.arg("--include_source_info")
.arg("-o")
.arg(&descriptor_set);
for include in includes {
cmd.arg("-I").arg(include.as_ref());
}
// Set the protoc include after the user includes in case the user wants to
// override one of the built-in .protos.
cmd.arg("-I").arg(protoc_include());
for arg in &self.protoc_args {
cmd.arg(arg);
}
for proto in protos {
cmd.arg(proto.as_ref());
}
let output = cmd.output()?;
if !output.status.success() {
return Err(Error::new(
ErrorKind::Other,
format!("protoc failed: {}", String::from_utf8_lossy(&output.stderr)),
));
}
let buf = fs::read(descriptor_set)?;
let descriptor_set = FileDescriptorSet::decode(&*buf).map_err(|error| {
Error::new(
ErrorKind::InvalidInput,
format!("invalid FileDescriptorSet: {}", error.to_string()),
)
})?;
let modules = self.generate(descriptor_set.file)?;
for (module, content) in modules {
let mut filename = module.join(".");
filename.push_str(".rs");
let output_path = target.join(&filename);
let previous_content = fs::read(&output_path);
if previous_content
.map(|previous_content| previous_content == content.as_bytes())
.unwrap_or(false)
{
trace!("unchanged: {:?}", filename);
} else {
trace!("writing: {:?}", filename);
fs::write(output_path, content)?;
}
}
Ok(())
}
fn generate(&mut self, files: Vec<FileDescriptorProto>) -> Result<HashMap<Module, String>> {
let mut modules = HashMap::new();
let mut packages = HashMap::new();
let message_graph = MessageGraph::new(&files)
.map_err(|error| Error::new(ErrorKind::InvalidInput, error))?;
let extern_paths = ExternPaths::new(&self.extern_paths, self.prost_types)
.map_err(|error| Error::new(ErrorKind::InvalidInput, error))?;
for file in files {
let module = self.module(&file);
// Only record packages that have services
if !file.service.is_empty() {
packages.insert(module.clone(), file.package().to_string());
}
let mut buf = modules.entry(module).or_insert_with(String::new);
CodeGenerator::generate(self, &message_graph, &extern_paths, file, &mut buf);
}
if let Some(ref mut service_generator) = self.service_generator {
for (module, package) in packages {
let buf = modules.get_mut(&module).unwrap();
service_generator.finalize_package(&package, buf);
}
}
Ok(modules)
}
fn module(&self, file: &FileDescriptorProto) -> Module {
file.package()
.split('.')
.filter(|s| !s.is_empty())
.map(to_snake)
.collect()
}
}
impl default::Default for Config {
fn default() -> Config {
Config {
service_generator: None,
btree_map: Vec::new(),
bytes: Vec::new(),
type_attributes: Vec::new(),
field_attributes: Vec::new(),
field_type_attributes: Vec::new(),
prost_types: true,
strip_enum_prefix: true,
out_dir: None,
extern_paths: Vec::new(),
protoc_args: Vec::new(),
disable_comments: false,
}
}
}
impl fmt::Debug for Config {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Config")
.field("btree_map", &self.btree_map)
.field("type_attributes", &self.type_attributes)
.field("field_attributes", &self.field_attributes)
.field("field_type_attributes", &self.field_type_attributes)
.field("prost_types", &self.prost_types)
.field("strip_enum_prefix", &self.strip_enum_prefix)
.field("out_dir", &self.out_dir)
.field("extern_paths", &self.extern_paths)
.finish()
}
}
/// Compile `.proto` files into Rust files during a Cargo build.
///
/// The generated `.rs` files are written to the Cargo `OUT_DIR` directory, suitable for use with
/// the [include!][1] macro. See the [Cargo `build.rs` code generation][2] example for more info.
///
/// This function should be called in a project's `build.rs`.
///
/// # Arguments
///
/// **`protos`** - Paths to `.proto` files to compile. Any transitively [imported][3] `.proto`
/// files are automatically be included.
///
/// **`includes`** - Paths to directories in which to search for imports. Directories are searched
/// in order. The `.proto` files passed in **`protos`** must be found in one of the provided
/// include directories.
///
/// # Errors
///
/// This function can fail for a number of reasons:
///
/// - Failure to locate or download `protoc`.
/// - Failure to parse the `.proto`s.
/// - Failure to locate an imported `.proto`.
/// - Failure to compile a `.proto` without a [package specifier][4].
///
/// It's expected that this function call be `unwrap`ed in a `build.rs`; there is typically no
/// reason to gracefully recover from errors during a build.
///
/// # Example `build.rs`
///
/// ```rust,no_run
/// # use std::io::Result;
/// fn main() -> Result<()> {
/// prost_build::compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?;
/// Ok(())
/// }
/// ```
///
/// [1]: https://doc.rust-lang.org/std/macro.include.html
/// [2]: http://doc.crates.io/build-script.html#case-study-code-generation
/// [3]: https://developers.google.com/protocol-buffers/docs/proto3#importing-definitions
/// [4]: https://developers.google.com/protocol-buffers/docs/proto#packages
pub fn compile_protos<P>(protos: &[P], includes: &[P]) -> Result<()>
where
P: AsRef<Path>,
{
Config::new().compile_protos(protos, includes)
}
/// Returns the path to the `protoc` binary.
pub fn protoc() -> PathBuf {
match env::var_os("PROTOC") {
Some(protoc) => PathBuf::from(protoc),
None => PathBuf::from(env!("PROTOC")),
}
}
/// Returns the path to the Protobuf include directory.
pub fn protoc_include() -> PathBuf {
match env::var_os("PROTOC_INCLUDE") {
Some(include) => PathBuf::from(include),
None => PathBuf::from(env!("PROTOC_INCLUDE")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::cell::RefCell;
use std::rc::Rc;
/// An example service generator that generates a trait with methods corresponding to the
/// service methods.
struct ServiceTraitGenerator;
impl ServiceGenerator for ServiceTraitGenerator {
fn generate(&mut self, service: Service, buf: &mut String) {
// Generate a trait for the service.
service.comments.append_with_indent(0, buf);
buf.push_str(&format!("trait {} {{\n", &service.name));
// Generate the service methods.
for method in service.methods {
method.comments.append_with_indent(1, buf);
buf.push_str(&format!(
" fn {}({}) -> {};\n",
method.name, method.input_type, method.output_type
));
}
// Close out the trait.
buf.push_str("}\n");
}
fn finalize(&mut self, buf: &mut String) {
// Needs to be present only once, no matter how many services there are
buf.push_str("pub mod utils { }\n");
}
}
/// Implements `ServiceGenerator` and provides some state for assertions.
struct MockServiceGenerator {
state: Rc<RefCell<MockState>>,
}
/// Holds state for `MockServiceGenerator`
#[derive(Default)]
struct MockState {
service_names: Vec<String>,
package_names: Vec<String>,
finalized: u32,
}
impl MockServiceGenerator {
fn new(state: Rc<RefCell<MockState>>) -> Self {
Self { state }
}
}
impl ServiceGenerator for MockServiceGenerator {
fn generate(&mut self, service: Service, _buf: &mut String) {
let mut state = self.state.borrow_mut();
state.service_names.push(service.name);
}
fn finalize(&mut self, _buf: &mut String) {
let mut state = self.state.borrow_mut();
state.finalized += 1;
}
fn finalize_package(&mut self, package: &str, _buf: &mut String) {
let mut state = self.state.borrow_mut();
state.package_names.push(package.to_string());
}
}
#[test]
fn smoke_test() {
let _ = env_logger::try_init();
Config::new()
.service_generator(Box::new(ServiceTraitGenerator))
.compile_protos(&["src/smoke_test.proto"], &["src"])
.unwrap();
}
#[test]
fn finalize_package() {
let _ = env_logger::try_init();
let state = Rc::new(RefCell::new(MockState::default()));
let gen = MockServiceGenerator::new(Rc::clone(&state));
Config::new()
.service_generator(Box::new(gen))
.compile_protos(&["src/hello.proto", "src/goodbye.proto"], &["src"])
.unwrap();
let state = state.borrow();
assert_eq!(&state.service_names, &["Greeting", "Farewell"]);
assert_eq!(&state.package_names, &["helloworld"]);
assert_eq!(state.finalized, 3);
}
}
| 36.342733 | 100 | 0.603975 |
71176c19538e7fb3000c3737c87fe2700d31e43c | 1,172 | use js_sys::Array;
use wasm_bindgen::{prelude::*, JsCast};
use web_sys::{DedicatedWorkerGlobalScope, MessageEvent};
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
fn main() {
console_error_panic_hook::set_once();
web_sys::console::log_1(&"worker starting".into());
let scope = DedicatedWorkerGlobalScope::from(JsValue::from(js_sys::global()));
let scope_clone = scope.clone();
let onmessage = Closure::wrap(Box::new(move |msg: MessageEvent| {
web_sys::console::log_1(&"got message".into());
let data = Array::from(&msg.data());
let a = data.get(0).as_f64().expect("first array value to be a number") as u32;
let b = data.get(1).as_f64().expect("second array value to be a number") as u32;
data.push(&(a * b).into());
scope_clone
.post_message(&data.into())
.expect("posting result message succeeds");
}) as Box<dyn Fn(MessageEvent)>);
scope.set_onmessage(Some(onmessage.as_ref().unchecked_ref()));
onmessage.forget();
scope
.post_message(&Array::new().into())
.expect("posting ready message succeeds");
}
| 34.470588 | 88 | 0.646758 |
8a7dd78c834ce875de57906aeab93e5184dca76e | 16,668 | // This file is part of Substrate.
// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Transactions handling to plug on top of the network service.
//!
//! Usage:
//!
//! - Use [`TransactionsHandlerPrototype::new`] to create a prototype.
//! - Pass the return value of [`TransactionsHandlerPrototype::set_config`] to the network
//! configuration as an extra peers set.
//! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a
//! `Future` that processes transactions.
//!
use crate::{
ExHashT, Event, ObservedRole,
config::{self, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport},
error, protocol::message, service::NetworkService, utils::{interval, LruHashSet},
};
use codec::{Decode, Encode};
use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered};
use libp2p::{multiaddr, PeerId};
use log::{trace, debug, warn};
use prometheus_endpoint::{
Registry, Counter, PrometheusError, register, U64
};
use sp_runtime::traits::Block as BlockT;
use std::borrow::Cow;
use std::collections::{HashMap, hash_map::Entry};
use std::sync::{atomic::{AtomicBool, Ordering}, Arc};
use std::{iter, num::NonZeroUsize, pin::Pin, task::Poll, time};
/// Interval at which we propagate transactions;
const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900);
/// Maximum number of known transaction hashes to keep for a peer.
///
/// This should be approx. 2 blocks full of transactions for the network to function properly.
const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead.
/// Maximum allowed size for a transactions notification.
const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024;
/// Maximum number of transaction validation request we keep at any moment.
const MAX_PENDING_TRANSACTIONS: usize = 8192;
mod rep {
use sc_peerset::ReputationChange as Rep;
/// Reputation change when a peer sends us any transaction.
///
/// This forces node to verify it, thus the negative value here. Once transaction is verified,
/// reputation change should be refunded with `ANY_TRANSACTION_REFUND`
pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction");
/// Reputation change when a peer sends us any transaction that is not invalid.
pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)");
/// Reputation change when a peer sends us an transaction that we didn't know about.
pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction");
/// Reputation change when a peer sends us a bad transaction.
pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction");
/// We received an unexpected transaction packet.
pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet");
}
struct Metrics {
propagated_transactions: Counter<U64>,
}
impl Metrics {
fn register(r: &Registry) -> Result<Self, PrometheusError> {
Ok(Metrics {
propagated_transactions: register(Counter::new(
"sync_propagated_transactions",
"Number of transactions propagated to at least one peer",
)?, r)?,
})
}
}
#[pin_project::pin_project]
struct PendingTransaction<H> {
#[pin]
validation: TransactionImportFuture,
tx_hash: H,
}
impl<H: ExHashT> Future for PendingTransaction<H> {
type Output = (H, TransactionImport);
fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) {
return Poll::Ready((this.tx_hash.clone(), import_result));
}
Poll::Pending
}
}
/// Prototype for a [`TransactionsHandler`].
pub struct TransactionsHandlerPrototype {
protocol_name: Cow<'static, str>,
}
impl TransactionsHandlerPrototype {
/// Create a new instance.
pub fn new(protocol_id: ProtocolId) -> Self {
TransactionsHandlerPrototype {
protocol_name: Cow::from({
let mut proto = String::new();
proto.push_str("/");
proto.push_str(protocol_id.as_ref());
proto.push_str("/transactions/1");
proto
})
}
}
/// Returns the configuration of the set to put in the network configuration.
pub fn set_config(&self) -> config::NonDefaultSetConfig {
config::NonDefaultSetConfig {
notifications_protocol: self.protocol_name.clone(),
fallback_names: Vec::new(),
max_notification_size: MAX_TRANSACTIONS_SIZE,
set_config: config::SetConfig {
in_peers: 0,
out_peers: 0,
reserved_nodes: Vec::new(),
non_reserved_mode: config::NonReservedPeerMode::Deny,
}
}
}
/// Turns the prototype into the actual handler. Returns a controller that allows controlling
/// the behaviour of the handler while it's running.
///
/// Important: the transactions handler is initially disabled and doesn't gossip transactions.
/// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it.
pub fn build<B: BlockT + 'static, H: ExHashT>(
self,
service: Arc<NetworkService<B, H>>,
local_role: config::Role,
transaction_pool: Arc<dyn TransactionPool<H, B>>,
metrics_registry: Option<&Registry>,
) -> error::Result<(TransactionsHandler<B, H>, TransactionsHandlerController<H>)> {
let event_stream = service.event_stream("transactions-handler").boxed();
let (to_handler, from_controller) = mpsc::unbounded();
let gossip_enabled = Arc::new(AtomicBool::new(false));
let handler = TransactionsHandler {
protocol_name: self.protocol_name,
propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)),
pending_transactions: FuturesUnordered::new(),
pending_transactions_peers: HashMap::new(),
gossip_enabled: gossip_enabled.clone(),
service,
event_stream,
peers: HashMap::new(),
transaction_pool,
local_role,
from_controller,
metrics: if let Some(r) = metrics_registry {
Some(Metrics::register(r)?)
} else {
None
},
};
let controller = TransactionsHandlerController {
to_handler,
gossip_enabled,
};
Ok((handler, controller))
}
}
/// Controls the behaviour of a [`TransactionsHandler`] it is connected to.
pub struct TransactionsHandlerController<H: ExHashT> {
to_handler: mpsc::UnboundedSender<ToHandler<H>>,
gossip_enabled: Arc<AtomicBool>,
}
impl<H: ExHashT> TransactionsHandlerController<H> {
/// Controls whether transactions are being gossiped on the network.
pub fn set_gossip_enabled(&mut self, enabled: bool) {
self.gossip_enabled.store(enabled, Ordering::Relaxed);
}
/// You may call this when new transactions are imported by the transaction pool.
///
/// All transactions will be fetched from the `TransactionPool` that was passed at
/// initialization as part of the configuration and propagated to peers.
pub fn propagate_transactions(&self) {
let _ = self.to_handler.unbounded_send(ToHandler::PropagateTransactions);
}
/// You must call when new a transaction is imported by the transaction pool.
///
/// This transaction will be fetched from the `TransactionPool` that was passed at
/// initialization as part of the configuration and propagated to peers.
pub fn propagate_transaction(&self, hash: H) {
let _ = self.to_handler.unbounded_send(ToHandler::PropagateTransaction(hash));
}
}
enum ToHandler<H: ExHashT> {
PropagateTransactions,
PropagateTransaction(H),
}
/// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing.
pub struct TransactionsHandler<B: BlockT + 'static, H: ExHashT> {
protocol_name: Cow<'static, str>,
/// Interval at which we call `propagate_transactions`.
propagate_timeout: Pin<Box<dyn Stream<Item = ()> + Send>>,
/// Pending transactions verification tasks.
pending_transactions: FuturesUnordered<PendingTransaction<H>>,
/// As multiple peers can send us the same transaction, we group
/// these peers using the transaction hash while the transaction is
/// imported. This prevents that we import the same transaction
/// multiple times concurrently.
pending_transactions_peers: HashMap<H, Vec<PeerId>>,
/// Network service to use to send messages and manage peers.
service: Arc<NetworkService<B, H>>,
/// Stream of networking events.
event_stream: Pin<Box<dyn Stream<Item = Event> + Send>>,
// All connected peers
peers: HashMap<PeerId, Peer<H>>,
transaction_pool: Arc<dyn TransactionPool<H, B>>,
gossip_enabled: Arc<AtomicBool>,
local_role: config::Role,
from_controller: mpsc::UnboundedReceiver<ToHandler<H>>,
/// Prometheus metrics.
metrics: Option<Metrics>,
}
/// Peer information
#[derive(Debug)]
struct Peer<H: ExHashT> {
/// Holds a set of transactions known to this peer.
known_transactions: LruHashSet<H>,
role: ObservedRole,
}
impl<B: BlockT + 'static, H: ExHashT> TransactionsHandler<B, H> {
/// Turns the [`TransactionsHandler`] into a future that should run forever and not be
/// interrupted.
pub async fn run(mut self) {
loop {
futures::select!{
_ = self.propagate_timeout.next().fuse() => {
self.propagate_transactions();
},
(tx_hash, result) = self.pending_transactions.select_next_some() => {
if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) {
peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result));
} else {
warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!");
}
},
network_event = self.event_stream.next().fuse() => {
if let Some(network_event) = network_event {
self.handle_network_event(network_event).await;
} else {
// Networking has seemingly closed. Closing as well.
return;
}
},
message = self.from_controller.select_next_some().fuse() => {
match message {
ToHandler::PropagateTransaction(hash) => self.propagate_transaction(&hash),
ToHandler::PropagateTransactions => self.propagate_transactions(),
}
},
}
}
}
async fn handle_network_event(&mut self, event: Event) {
match event {
Event::Dht(_) => {},
Event::SyncConnected { remote } => {
let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
.collect::<multiaddr::Multiaddr>();
let result = self.service.add_peers_to_reserved_set(
self.protocol_name.clone(),
iter::once(addr).collect()
);
if let Err(err) = result {
log::error!(target: "sync", "Add reserved peer failed: {}", err);
}
},
Event::SyncDisconnected { remote } => {
let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
.collect::<multiaddr::Multiaddr>();
let result = self.service.remove_peers_from_reserved_set(
self.protocol_name.clone(),
iter::once(addr).collect()
);
if let Err(err) = result {
log::error!(target: "sync", "Removing reserved peer failed: {}", err);
}
},
Event::NotificationStreamOpened { remote, protocol, role, .. } if protocol == self.protocol_name => {
let _was_in = self.peers.insert(remote, Peer {
known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS)
.expect("Constant is nonzero")),
role,
});
debug_assert!(_was_in.is_none());
}
Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => {
let _peer = self.peers.remove(&remote);
debug_assert!(_peer.is_some());
}
Event::NotificationsReceived { remote, messages } => {
for (protocol, message) in messages {
if protocol != self.protocol_name {
continue;
}
if let Ok(m) = <message::Transactions<B::Extrinsic> as Decode>::decode(
&mut message.as_ref(),
) {
self.on_transactions(remote, m);
} else {
warn!(target: "sub-libp2p", "Failed to decode transactions list");
}
}
},
// Not our concern.
Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}
}
}
/// Called when peer sends us new transactions
fn on_transactions(
&mut self,
who: PeerId,
transactions: message::Transactions<B::Extrinsic>,
) {
// sending transaction to light node is considered a bad behavior
if matches!(self.local_role, config::Role::Light) {
debug!(target: "sync", "Peer {} is trying to send transactions to the light node", who);
self.service.disconnect_peer(who, self.protocol_name.clone());
self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS);
return;
}
// Accept transactions only when enabled
if !self.gossip_enabled.load(Ordering::Relaxed) {
trace!(target: "sync", "{} Ignoring transactions while disabled", who);
return;
}
trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who);
if let Some(ref mut peer) = self.peers.get_mut(&who) {
for t in transactions {
if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS {
debug!(
target: "sync",
"Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit",
MAX_PENDING_TRANSACTIONS,
);
break;
}
let hash = self.transaction_pool.hash_of(&t);
peer.known_transactions.insert(hash.clone());
self.service.report_peer(who.clone(), rep::ANY_TRANSACTION);
match self.pending_transactions_peers.entry(hash.clone()) {
Entry::Vacant(entry) => {
self.pending_transactions.push(PendingTransaction {
validation: self.transaction_pool.import(t),
tx_hash: hash,
});
entry.insert(vec![who.clone()]);
},
Entry::Occupied(mut entry) => {
entry.get_mut().push(who.clone());
}
}
}
}
}
fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) {
match import {
TransactionImport::KnownGood => self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND),
TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION),
TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION),
TransactionImport::None => {},
}
}
/// Propagate one transaction.
pub fn propagate_transaction(
&mut self,
hash: &H,
) {
debug!(target: "sync", "Propagating transaction [{:?}]", hash);
// Accept transactions only when enabled
if !self.gossip_enabled.load(Ordering::Relaxed) {
return;
}
if let Some(transaction) = self.transaction_pool.transaction(hash) {
let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]);
self.transaction_pool.on_broadcasted(propagated_to);
}
}
fn do_propagate_transactions(
&mut self,
transactions: &[(H, B::Extrinsic)],
) -> HashMap<H, Vec<String>> {
let mut propagated_to = HashMap::<_, Vec<_>>::new();
let mut propagated_transactions = 0;
for (who, peer) in self.peers.iter_mut() {
// never send transactions to the light node
if matches!(peer.role, ObservedRole::Light) {
continue;
}
let (hashes, to_send): (Vec<_>, Vec<_>) = transactions
.iter()
.filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone()))
.cloned()
.unzip();
propagated_transactions += hashes.len();
if !to_send.is_empty() {
for hash in hashes {
propagated_to
.entry(hash)
.or_default()
.push(who.to_base58());
}
trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who);
self.service.write_notification(
who.clone(),
self.protocol_name.clone(),
to_send.encode()
);
}
}
if let Some(ref metrics) = self.metrics {
metrics.propagated_transactions.inc_by(propagated_transactions as _)
}
propagated_to
}
/// Call when we must propagate ready transactions to peers.
fn propagate_transactions(&mut self) {
// Accept transactions only when enabled
if !self.gossip_enabled.load(Ordering::Relaxed) {
return;
}
debug!(target: "sync", "Propagating transactions");
let transactions = self.transaction_pool.transactions();
let propagated_to = self.do_propagate_transactions(&transactions);
self.transaction_pool.on_broadcasted(propagated_to);
}
}
| 34.016327 | 104 | 0.703024 |
9cb0a2972c3f83bc83470ef22b6befe558ea1b4b | 3,347 | //! This module contains the definition of a "StringSet" a set of
//! logical strings and the code to create them from record batches.
use std::{collections::BTreeSet, sync::Arc};
use arrow::{datatypes::SchemaRef, record_batch::RecordBatch};
use arrow_deps::{
arrow,
arrow::array::{Array, StringArray},
arrow::datatypes::DataType,
};
use snafu::{ensure, OptionExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Error extracting results from Record Batches: schema not a single Utf8: {:?}",
schema
))]
InternalSchemaWasNotString { schema: SchemaRef },
#[snafu(display("Internal error, failed to downcast field to Utf8"))]
InternalFailedToDowncast {},
#[snafu(display("Internal error, unexpected null value"))]
InternalUnexpectedNull {},
#[snafu(display(
"Error reading record batch while converting to StringSet: {:?}",
source
))]
ReadingRecordBatch { source: arrow::error::ArrowError },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub type StringSet = BTreeSet<String>;
pub type StringSetRef = Arc<StringSet>;
/// Trait to convert RecordBatch'y things into
/// `StringSetRef`s. Assumes that the input record batches each have a
/// single string column. Can return errors, so don't use
/// `std::convert::From`
pub trait IntoStringSet {
/// Convert this thing into a stringset
fn into_stringset(self) -> Result<StringSetRef>;
}
impl IntoStringSet for &[&str] {
fn into_stringset(self) -> Result<StringSetRef> {
let set: StringSet = self.iter().map(|s| s.to_string()).collect();
Ok(Arc::new(set))
}
}
/// Converts record batches into StringSets.
impl IntoStringSet for Vec<RecordBatch> {
fn into_stringset(self) -> Result<StringSetRef> {
let mut strings = StringSet::new();
// process the record batches one by one
for record_batch in self.into_iter() {
let num_rows = record_batch.num_rows();
let schema = record_batch.schema();
let fields = schema.fields();
ensure!(
fields.len() == 1,
InternalSchemaWasNotString {
schema: schema.clone(),
}
);
let field = &fields[0];
ensure!(
field.data_type() == &DataType::Utf8,
InternalSchemaWasNotString {
schema: schema.clone(),
}
);
let array = record_batch
.column(0)
.as_any()
.downcast_ref::<StringArray>()
.context(InternalFailedToDowncast)?;
add_utf8_array_to_stringset(&mut strings, array, num_rows)?;
}
Ok(StringSetRef::new(strings))
}
}
fn add_utf8_array_to_stringset(
dest: &mut StringSet,
src: &StringArray,
num_rows: usize,
) -> Result<()> {
for i in 0..num_rows {
// Not sure how to handle a NULL -- StringSet contains
// Strings, not Option<String>
if src.is_null(i) {
return InternalUnexpectedNull {}.fail();
} else {
let src_value = src.value(i);
if !dest.contains(src_value) {
dest.insert(src_value.into());
}
}
}
Ok(())
}
| 29.619469 | 87 | 0.591575 |
038aee6bf7f85b66743ac0d619eea699cb1726a2 | 656 | #[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
_reserved0: [u8; 1280usize],
#[doc = "0x500 - Unspecified"]
pub vregradio: VREGRADIO,
}
#[doc = r"Register block"]
#[repr(C)]
pub struct VREGRADIO {
#[doc = "0x00 - Request high voltage on RADIO After requesting high voltage, the user must wait until VREQHREADY is set to Ready"]
pub vreqh: crate::Reg<self::vregradio::vreqh::VREQH_SPEC>,
_reserved1: [u8; 4usize],
#[doc = "0x08 - High voltage on RADIO is ready"]
pub vreqhready: crate::Reg<self::vregradio::vreqhready::VREQHREADY_SPEC>,
}
#[doc = r"Register block"]
#[doc = "Unspecified"]
pub mod vregradio;
| 32.8 | 134 | 0.676829 |
dbc94e5fcb10fbd62e6f1d6cb544cacb402b0f44 | 27,111 | use crate::{
GlobalLightMeta, GpuLights, LightMeta, NotShadowCaster, NotShadowReceiver, ShadowPipeline,
ViewClusterBindings, ViewLightsUniformOffset, ViewShadowBindings,
};
use bevy_app::Plugin;
use bevy_asset::{Assets, Handle, HandleUntyped};
use bevy_ecs::{
prelude::*,
system::{lifetimeless::*, SystemParamItem},
};
use bevy_math::Mat4;
use bevy_reflect::TypeUuid;
use bevy_render::{
mesh::Mesh,
render_asset::RenderAssets,
render_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin},
render_phase::{EntityRenderCommand, RenderCommandResult, TrackedRenderPass},
render_resource::*,
renderer::{RenderDevice, RenderQueue},
texture::{BevyDefault, GpuImage, Image, TextureFormatPixelInfo},
view::{ComputedVisibility, ViewUniform, ViewUniformOffset, ViewUniforms},
RenderApp, RenderStage,
};
use bevy_transform::components::GlobalTransform;
use crevice::std140::AsStd140;
use wgpu::{
Extent3d, ImageCopyTexture, ImageDataLayout, Origin3d, TextureDimension, TextureFormat,
TextureViewDescriptor,
};
#[derive(Default)]
pub struct MeshRenderPlugin;
pub const MESH_VIEW_BIND_GROUP_HANDLE: HandleUntyped =
HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 9076678235888822571);
pub const MESH_STRUCT_HANDLE: HandleUntyped =
HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 2506024101911992377);
pub const MESH_SHADER_HANDLE: HandleUntyped =
HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 3252377289100772450);
impl Plugin for MeshRenderPlugin {
fn build(&self, app: &mut bevy_app::App) {
let mut shaders = app.world.get_resource_mut::<Assets<Shader>>().unwrap();
shaders.set_untracked(
MESH_SHADER_HANDLE,
Shader::from_wgsl(include_str!("mesh.wgsl")),
);
shaders.set_untracked(
MESH_STRUCT_HANDLE,
Shader::from_wgsl(include_str!("mesh_struct.wgsl"))
.with_import_path("bevy_pbr::mesh_struct"),
);
shaders.set_untracked(
MESH_VIEW_BIND_GROUP_HANDLE,
Shader::from_wgsl(include_str!("mesh_view_bind_group.wgsl"))
.with_import_path("bevy_pbr::mesh_view_bind_group"),
);
app.add_plugin(UniformComponentPlugin::<MeshUniform>::default());
app.sub_app(RenderApp)
.init_resource::<MeshPipeline>()
.add_system_to_stage(RenderStage::Extract, extract_meshes)
.add_system_to_stage(RenderStage::Queue, queue_mesh_bind_group)
.add_system_to_stage(RenderStage::Queue, queue_mesh_view_bind_groups);
}
}
#[derive(Component, AsStd140, Clone)]
pub struct MeshUniform {
pub transform: Mat4,
pub inverse_transpose_model: Mat4,
pub flags: u32,
}
// NOTE: These must match the bit flags in bevy_pbr2/src/render/mesh.wgsl!
bitflags::bitflags! {
#[repr(transparent)]
struct MeshFlags: u32 {
const SHADOW_RECEIVER = (1 << 0);
const NONE = 0;
const UNINITIALIZED = 0xFFFF;
}
}
pub fn extract_meshes(
mut commands: Commands,
mut previous_caster_len: Local<usize>,
mut previous_not_caster_len: Local<usize>,
caster_query: Query<
(
Entity,
&ComputedVisibility,
&GlobalTransform,
&Handle<Mesh>,
Option<&NotShadowReceiver>,
),
Without<NotShadowCaster>,
>,
not_caster_query: Query<
(
Entity,
&ComputedVisibility,
&GlobalTransform,
&Handle<Mesh>,
Option<&NotShadowReceiver>,
),
With<NotShadowCaster>,
>,
) {
let mut caster_values = Vec::with_capacity(*previous_caster_len);
for (entity, computed_visibility, transform, handle, not_receiver) in caster_query.iter() {
if !computed_visibility.is_visible {
continue;
}
let transform = transform.compute_matrix();
caster_values.push((
entity,
(
handle.clone_weak(),
MeshUniform {
flags: if not_receiver.is_some() {
MeshFlags::empty().bits
} else {
MeshFlags::SHADOW_RECEIVER.bits
},
transform,
inverse_transpose_model: transform.inverse().transpose(),
},
),
));
}
*previous_caster_len = caster_values.len();
commands.insert_or_spawn_batch(caster_values);
let mut not_caster_values = Vec::with_capacity(*previous_not_caster_len);
for (entity, computed_visibility, transform, handle, not_receiver) in not_caster_query.iter() {
if !computed_visibility.is_visible {
continue;
}
let transform = transform.compute_matrix();
not_caster_values.push((
entity,
(
handle.clone_weak(),
MeshUniform {
flags: if not_receiver.is_some() {
MeshFlags::empty().bits
} else {
MeshFlags::SHADOW_RECEIVER.bits
},
transform,
inverse_transpose_model: transform.inverse().transpose(),
},
NotShadowCaster,
),
));
}
*previous_not_caster_len = not_caster_values.len();
commands.insert_or_spawn_batch(not_caster_values);
}
#[derive(Clone)]
pub struct MeshPipeline {
pub view_layout: BindGroupLayout,
pub mesh_layout: BindGroupLayout,
// This dummy white texture is to be used in place of optional StandardMaterial textures
pub dummy_white_gpu_image: GpuImage,
}
impl FromWorld for MeshPipeline {
fn from_world(world: &mut World) -> Self {
let render_device = world.get_resource::<RenderDevice>().unwrap();
let view_layout = render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[
// View
BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: BufferSize::new(ViewUniform::std140_size_static() as u64),
},
count: None,
},
// Lights
BindGroupLayoutEntry {
binding: 1,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: BufferSize::new(GpuLights::std140_size_static() as u64),
},
count: None,
},
// Point Shadow Texture Cube Array
BindGroupLayoutEntry {
binding: 2,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
multisampled: false,
sample_type: TextureSampleType::Depth,
view_dimension: TextureViewDimension::CubeArray,
},
count: None,
},
// Point Shadow Texture Array Sampler
BindGroupLayoutEntry {
binding: 3,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Sampler {
comparison: true,
filtering: true,
},
count: None,
},
// Directional Shadow Texture Array
BindGroupLayoutEntry {
binding: 4,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
multisampled: false,
sample_type: TextureSampleType::Depth,
view_dimension: TextureViewDimension::D2Array,
},
count: None,
},
// Directional Shadow Texture Array Sampler
BindGroupLayoutEntry {
binding: 5,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Sampler {
comparison: true,
filtering: true,
},
count: None,
},
// PointLights
BindGroupLayoutEntry {
binding: 6,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: false,
// NOTE: Static size for uniform buffers. GpuPointLight has a padded
// size of 128 bytes, so 16384 / 128 = 128 point lights max
min_binding_size: BufferSize::new(16384),
},
count: None,
},
// ClusteredLightIndexLists
BindGroupLayoutEntry {
binding: 7,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: false,
// NOTE: With 128 point lights max, indices need 7 bits. Use u8 for
// convenience.
min_binding_size: BufferSize::new(16384),
},
count: None,
},
// ClusterOffsetsAndCounts
BindGroupLayoutEntry {
binding: 8,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: false,
// NOTE: The offset needs to address 16384 indices, which needs 21 bits.
// The count can be at most all 128 lights so 7 bits.
// Pack the offset into the upper 24 bits and the count into the
// lower 8 bits for convenience.
min_binding_size: BufferSize::new(16384),
},
count: None,
},
],
label: Some("mesh_view_layout"),
});
let mesh_layout = render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: BufferSize::new(MeshUniform::std140_size_static() as u64),
},
count: None,
}],
label: Some("mesh_layout"),
});
// A 1x1x1 'all 1.0' texture to use as a dummy texture to use in place of optional StandardMaterial textures
let dummy_white_gpu_image = {
let image = Image::new_fill(
Extent3d::default(),
TextureDimension::D2,
&[255u8; 4],
TextureFormat::bevy_default(),
);
let texture = render_device.create_texture(&image.texture_descriptor);
let sampler = render_device.create_sampler(&image.sampler_descriptor);
let format_size = image.texture_descriptor.format.pixel_size();
let render_queue = world.get_resource_mut::<RenderQueue>().unwrap();
render_queue.write_texture(
ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&image.data,
ImageDataLayout {
offset: 0,
bytes_per_row: Some(
std::num::NonZeroU32::new(
image.texture_descriptor.size.width * format_size as u32,
)
.unwrap(),
),
rows_per_image: None,
},
image.texture_descriptor.size,
);
let texture_view = texture.create_view(&TextureViewDescriptor::default());
GpuImage {
texture,
texture_view,
sampler,
}
};
MeshPipeline {
view_layout,
mesh_layout,
dummy_white_gpu_image,
}
}
}
impl MeshPipeline {
pub fn get_image_texture<'a>(
&'a self,
gpu_images: &'a RenderAssets<Image>,
handle_option: &Option<Handle<Image>>,
) -> Option<(&'a TextureView, &'a Sampler)> {
if let Some(handle) = handle_option {
let gpu_image = gpu_images.get(handle)?;
Some((&gpu_image.texture_view, &gpu_image.sampler))
} else {
Some((
&self.dummy_white_gpu_image.texture_view,
&self.dummy_white_gpu_image.sampler,
))
}
}
}
bitflags::bitflags! {
#[repr(transparent)]
// NOTE: Apparently quadro drivers support up to 64x MSAA.
/// MSAA uses the highest 6 bits for the MSAA sample count - 1 to support up to 64x MSAA.
pub struct MeshPipelineKey: u32 {
const NONE = 0;
const VERTEX_TANGENTS = (1 << 0);
const TRANSPARENT_MAIN_PASS = (1 << 1);
const MSAA_RESERVED_BITS = MeshPipelineKey::MSAA_MASK_BITS << MeshPipelineKey::MSAA_SHIFT_BITS;
}
}
impl MeshPipelineKey {
const MSAA_MASK_BITS: u32 = 0b111111;
const MSAA_SHIFT_BITS: u32 = 32 - 6;
pub fn from_msaa_samples(msaa_samples: u32) -> Self {
let msaa_bits = ((msaa_samples - 1) & Self::MSAA_MASK_BITS) << Self::MSAA_SHIFT_BITS;
MeshPipelineKey::from_bits(msaa_bits).unwrap()
}
pub fn msaa_samples(&self) -> u32 {
((self.bits >> Self::MSAA_SHIFT_BITS) & Self::MSAA_MASK_BITS) + 1
}
}
impl SpecializedPipeline for MeshPipeline {
type Key = MeshPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let (vertex_array_stride, vertex_attributes) =
if key.contains(MeshPipelineKey::VERTEX_TANGENTS) {
(
48,
vec![
// Position (GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically))
VertexAttribute {
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
// Normal
VertexAttribute {
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
// Uv (GOTCHA! uv is no longer third in the buffer due to how Mesh sorts attributes (alphabetically))
VertexAttribute {
format: VertexFormat::Float32x2,
offset: 40,
shader_location: 2,
},
// Tangent
VertexAttribute {
format: VertexFormat::Float32x4,
offset: 24,
shader_location: 3,
},
],
)
} else {
(
32,
vec![
// Position (GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically))
VertexAttribute {
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
// Normal
VertexAttribute {
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
// Uv
VertexAttribute {
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
)
};
let mut shader_defs = Vec::new();
if key.contains(MeshPipelineKey::VERTEX_TANGENTS) {
shader_defs.push(String::from("VERTEX_TANGENTS"));
}
let (label, blend, depth_write_enabled);
if key.contains(MeshPipelineKey::TRANSPARENT_MAIN_PASS) {
label = "transparent_mesh_pipeline".into();
blend = Some(BlendState::ALPHA_BLENDING);
// For the transparent pass, fragments that are closer will be alpha blended
// but their depth is not written to the depth buffer
depth_write_enabled = false;
} else {
label = "opaque_mesh_pipeline".into();
blend = Some(BlendState::REPLACE);
// For the opaque and alpha mask passes, fragments that are closer will replace
// the current fragment value in the output and the depth is written to the
// depth buffer
depth_write_enabled = true;
}
RenderPipelineDescriptor {
vertex: VertexState {
shader: MESH_SHADER_HANDLE.typed::<Shader>(),
entry_point: "vertex".into(),
shader_defs: shader_defs.clone(),
buffers: vec![VertexBufferLayout {
array_stride: vertex_array_stride,
step_mode: VertexStepMode::Vertex,
attributes: vertex_attributes,
}],
},
fragment: Some(FragmentState {
shader: MESH_SHADER_HANDLE.typed::<Shader>(),
shader_defs,
entry_point: "fragment".into(),
targets: vec![ColorTargetState {
format: TextureFormat::bevy_default(),
blend,
write_mask: ColorWrites::ALL,
}],
}),
layout: Some(vec![self.view_layout.clone(), self.mesh_layout.clone()]),
primitive: PrimitiveState {
front_face: FrontFace::Ccw,
cull_mode: Some(Face::Back),
polygon_mode: PolygonMode::Fill,
clamp_depth: false,
conservative: false,
topology: PrimitiveTopology::TriangleList,
strip_index_format: None,
},
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled,
depth_compare: CompareFunction::Greater,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some(label),
}
}
}
pub struct MeshBindGroup {
pub value: BindGroup,
}
pub fn queue_mesh_bind_group(
mut commands: Commands,
mesh_pipeline: Res<MeshPipeline>,
render_device: Res<RenderDevice>,
mesh_uniforms: Res<ComponentUniforms<MeshUniform>>,
) {
if let Some(binding) = mesh_uniforms.uniforms().binding() {
commands.insert_resource(MeshBindGroup {
value: render_device.create_bind_group(&BindGroupDescriptor {
entries: &[BindGroupEntry {
binding: 0,
resource: binding,
}],
label: Some("mesh_bind_group"),
layout: &mesh_pipeline.mesh_layout,
}),
});
}
}
#[derive(Component)]
pub struct MeshViewBindGroup {
pub value: BindGroup,
}
#[allow(clippy::too_many_arguments)]
pub fn queue_mesh_view_bind_groups(
mut commands: Commands,
render_device: Res<RenderDevice>,
mesh_pipeline: Res<MeshPipeline>,
shadow_pipeline: Res<ShadowPipeline>,
light_meta: Res<LightMeta>,
global_light_meta: Res<GlobalLightMeta>,
view_uniforms: Res<ViewUniforms>,
mut views: Query<(Entity, &ViewShadowBindings, &ViewClusterBindings)>,
) {
if let (Some(view_binding), Some(light_binding), Some(point_light_binding)) = (
view_uniforms.uniforms.binding(),
light_meta.view_gpu_lights.binding(),
global_light_meta.gpu_point_lights.binding(),
) {
for (entity, view_shadow_bindings, view_cluster_bindings) in views.iter_mut() {
let view_bind_group = render_device.create_bind_group(&BindGroupDescriptor {
entries: &[
BindGroupEntry {
binding: 0,
resource: view_binding.clone(),
},
BindGroupEntry {
binding: 1,
resource: light_binding.clone(),
},
BindGroupEntry {
binding: 2,
resource: BindingResource::TextureView(
&view_shadow_bindings.point_light_depth_texture_view,
),
},
BindGroupEntry {
binding: 3,
resource: BindingResource::Sampler(&shadow_pipeline.point_light_sampler),
},
BindGroupEntry {
binding: 4,
resource: BindingResource::TextureView(
&view_shadow_bindings.directional_light_depth_texture_view,
),
},
BindGroupEntry {
binding: 5,
resource: BindingResource::Sampler(
&shadow_pipeline.directional_light_sampler,
),
},
BindGroupEntry {
binding: 6,
resource: point_light_binding.clone(),
},
BindGroupEntry {
binding: 7,
resource: view_cluster_bindings
.cluster_light_index_lists
.binding()
.unwrap(),
},
BindGroupEntry {
binding: 8,
resource: view_cluster_bindings
.cluster_offsets_and_counts
.binding()
.unwrap(),
},
],
label: Some("mesh_view_bind_group"),
layout: &mesh_pipeline.view_layout,
});
commands.entity(entity).insert(MeshViewBindGroup {
value: view_bind_group,
});
}
}
}
pub struct SetMeshViewBindGroup<const I: usize>;
impl<const I: usize> EntityRenderCommand for SetMeshViewBindGroup<I> {
type Param = SQuery<(
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
Read<MeshViewBindGroup>,
)>;
#[inline]
fn render<'w>(
view: Entity,
_item: Entity,
view_query: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let (view_uniform, view_lights, mesh_view_bind_group) = view_query.get(view).unwrap();
pass.set_bind_group(
I,
&mesh_view_bind_group.value,
&[view_uniform.offset, view_lights.offset],
);
RenderCommandResult::Success
}
}
pub struct SetMeshBindGroup<const I: usize>;
impl<const I: usize> EntityRenderCommand for SetMeshBindGroup<I> {
type Param = (
SRes<MeshBindGroup>,
SQuery<Read<DynamicUniformIndex<MeshUniform>>>,
);
#[inline]
fn render<'w>(
_view: Entity,
item: Entity,
(mesh_bind_group, mesh_query): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let mesh_index = mesh_query.get(item).unwrap();
pass.set_bind_group(
I,
&mesh_bind_group.into_inner().value,
&[mesh_index.index()],
);
RenderCommandResult::Success
}
}
pub struct DrawMesh;
impl EntityRenderCommand for DrawMesh {
type Param = (SRes<RenderAssets<Mesh>>, SQuery<Read<Handle<Mesh>>>);
#[inline]
fn render<'w>(
_view: Entity,
item: Entity,
(meshes, mesh_query): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let mesh_handle = mesh_query.get(item).unwrap();
if let Some(gpu_mesh) = meshes.into_inner().get(mesh_handle) {
pass.set_vertex_buffer(0, gpu_mesh.vertex_buffer.slice(..));
if let Some(index_info) = &gpu_mesh.index_info {
pass.set_index_buffer(index_info.buffer.slice(..), 0, index_info.index_format);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
RenderCommandResult::Success
} else {
RenderCommandResult::Failure
}
}
}
#[cfg(test)]
mod tests {
use super::MeshPipelineKey;
#[test]
fn mesh_key_msaa_samples() {
for i in 1..=64 {
assert_eq!(MeshPipelineKey::from_msaa_samples(i).msaa_samples(), i);
}
}
}
| 37.549861 | 137 | 0.516728 |
11d1a3b82efc1c6e35540bf543255c0eb747f2bd | 27,624 | #![cfg_attr(not(super_unstable), allow(dead_code))]
use std::fmt;
use std::iter;
use std::panic::{self, PanicInfo};
#[cfg(super_unstable)]
use std::path::PathBuf;
use std::str::FromStr;
use fallback;
use proc_macro;
use {Delimiter, Punct, Spacing, TokenTree};
#[derive(Clone)]
pub enum TokenStream {
Compiler(proc_macro::TokenStream),
Fallback(fallback::TokenStream),
}
pub enum LexError {
Compiler(proc_macro::LexError),
Fallback(fallback::LexError),
}
fn nightly_works() -> bool {
use std::sync::atomic::*;
use std::sync::Once;
static WORKS: AtomicUsize = ATOMIC_USIZE_INIT;
static INIT: Once = Once::new();
match WORKS.load(Ordering::SeqCst) {
1 => return false,
2 => return true,
_ => {}
}
// Swap in a null panic hook to avoid printing "thread panicked" to stderr,
// then use catch_unwind to determine whether the compiler's proc_macro is
// working. When proc-macro2 is used from outside of a procedural macro all
// of the proc_macro crate's APIs currently panic.
//
// The Once is to prevent the possibility of this ordering:
//
// thread 1 calls take_hook, gets the user's original hook
// thread 1 calls set_hook with the null hook
// thread 2 calls take_hook, thinks null hook is the original hook
// thread 2 calls set_hook with the null hook
// thread 1 calls set_hook with the actual original hook
// thread 2 calls set_hook with what it thinks is the original hook
//
// in which the user's hook has been lost.
//
// There is still a race condition where a panic in a different thread can
// happen during the interval that the user's original panic hook is
// unregistered such that their hook is incorrectly not called. This is
// sufficiently unlikely and less bad than printing panic messages to stderr
// on correct use of this crate. Maybe there is a libstd feature request
// here. For now, if a user needs to guarantee that this failure mode does
// not occur, they need to call e.g. `proc_macro2::Span::call_site()` from
// the main thread before launching any other threads.
INIT.call_once(|| {
type PanicHook = Fn(&PanicInfo) + Sync + Send + 'static;
let null_hook: Box<PanicHook> = Box::new(|_panic_info| { /* ignore */ });
let sanity_check = &*null_hook as *const PanicHook;
let original_hook = panic::take_hook();
panic::set_hook(null_hook);
let works = panic::catch_unwind(|| proc_macro::Span::call_site()).is_ok();
WORKS.store(works as usize + 1, Ordering::SeqCst);
let hopefully_null_hook = panic::take_hook();
panic::set_hook(original_hook);
if sanity_check != &*hopefully_null_hook {
panic!("observed race condition in proc_macro2::nightly_works");
}
});
nightly_works()
}
fn mismatch() -> ! {
panic!("stable/nightly mismatch")
}
impl TokenStream {
pub fn new() -> TokenStream {
if nightly_works() {
TokenStream::Compiler(proc_macro::TokenStream::new())
} else {
TokenStream::Fallback(fallback::TokenStream::new())
}
}
pub fn is_empty(&self) -> bool {
match self {
TokenStream::Compiler(tts) => tts.is_empty(),
TokenStream::Fallback(tts) => tts.is_empty(),
}
}
fn unwrap_nightly(self) -> proc_macro::TokenStream {
match self {
TokenStream::Compiler(s) => s,
TokenStream::Fallback(_) => mismatch(),
}
}
fn unwrap_stable(self) -> fallback::TokenStream {
match self {
TokenStream::Compiler(_) => mismatch(),
TokenStream::Fallback(s) => s,
}
}
}
impl FromStr for TokenStream {
type Err = LexError;
fn from_str(src: &str) -> Result<TokenStream, LexError> {
if nightly_works() {
Ok(TokenStream::Compiler(src.parse()?))
} else {
Ok(TokenStream::Fallback(src.parse()?))
}
}
}
impl fmt::Display for TokenStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TokenStream::Compiler(tts) => tts.fmt(f),
TokenStream::Fallback(tts) => tts.fmt(f),
}
}
}
impl From<proc_macro::TokenStream> for TokenStream {
fn from(inner: proc_macro::TokenStream) -> TokenStream {
TokenStream::Compiler(inner)
}
}
impl From<TokenStream> for proc_macro::TokenStream {
fn from(inner: TokenStream) -> proc_macro::TokenStream {
match inner {
TokenStream::Compiler(inner) => inner,
TokenStream::Fallback(inner) => inner.to_string().parse().unwrap(),
}
}
}
impl From<fallback::TokenStream> for TokenStream {
fn from(inner: fallback::TokenStream) -> TokenStream {
TokenStream::Fallback(inner)
}
}
impl From<TokenTree> for TokenStream {
fn from(token: TokenTree) -> TokenStream {
if !nightly_works() {
return TokenStream::Fallback(token.into());
}
let tt: proc_macro::TokenTree = match token {
TokenTree::Group(tt) => tt.inner.unwrap_nightly().into(),
TokenTree::Punct(tt) => {
let spacing = match tt.spacing() {
Spacing::Joint => proc_macro::Spacing::Joint,
Spacing::Alone => proc_macro::Spacing::Alone,
};
let mut op = proc_macro::Punct::new(tt.as_char(), spacing);
op.set_span(tt.span().inner.unwrap_nightly());
op.into()
}
TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(),
TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(),
};
TokenStream::Compiler(tt.into())
}
}
impl iter::FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(trees: I) -> Self {
if nightly_works() {
let trees = trees
.into_iter()
.map(TokenStream::from)
.flat_map(|t| match t {
TokenStream::Compiler(s) => s,
TokenStream::Fallback(_) => mismatch(),
});
TokenStream::Compiler(trees.collect())
} else {
TokenStream::Fallback(trees.into_iter().collect())
}
}
}
impl iter::FromIterator<TokenStream> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self {
let mut streams = streams.into_iter();
match streams.next() {
#[cfg(slow_extend)]
Some(TokenStream::Compiler(first)) => {
let stream = iter::once(first)
.chain(streams.map(|s| match s {
TokenStream::Compiler(s) => s,
TokenStream::Fallback(_) => mismatch(),
}))
.collect();
TokenStream::Compiler(stream)
}
#[cfg(not(slow_extend))]
Some(TokenStream::Compiler(mut first)) => {
first.extend(streams.map(|s| match s {
TokenStream::Compiler(s) => s,
TokenStream::Fallback(_) => mismatch(),
}));
TokenStream::Compiler(first)
}
Some(TokenStream::Fallback(mut first)) => {
first.extend(streams.map(|s| match s {
TokenStream::Fallback(s) => s,
TokenStream::Compiler(_) => mismatch(),
}));
TokenStream::Fallback(first)
}
None => TokenStream::new(),
}
}
}
impl Extend<TokenTree> for TokenStream {
fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) {
match self {
TokenStream::Compiler(tts) => {
#[cfg(not(slow_extend))]
{
tts.extend(
streams
.into_iter()
.map(|t| TokenStream::from(t).unwrap_nightly()),
);
}
#[cfg(slow_extend)]
{
*tts =
tts.clone()
.into_iter()
.chain(streams.into_iter().map(TokenStream::from).flat_map(
|t| match t {
TokenStream::Compiler(tts) => tts.into_iter(),
_ => mismatch(),
},
))
.collect();
}
}
TokenStream::Fallback(tts) => tts.extend(streams),
}
}
}
impl Extend<TokenStream> for TokenStream {
fn extend<I: IntoIterator<Item = TokenStream>>(&mut self, streams: I) {
match self {
TokenStream::Compiler(tts) => {
#[cfg(not(slow_extend))]
{
tts.extend(streams.into_iter().map(|stream| stream.unwrap_nightly()));
}
#[cfg(slow_extend)]
{
*tts = tts
.clone()
.into_iter()
.chain(streams.into_iter().flat_map(|t| match t {
TokenStream::Compiler(tts) => tts.into_iter(),
_ => mismatch(),
}))
.collect();
}
}
TokenStream::Fallback(tts) => {
tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable()))
}
}
}
}
impl fmt::Debug for TokenStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TokenStream::Compiler(tts) => tts.fmt(f),
TokenStream::Fallback(tts) => tts.fmt(f),
}
}
}
impl From<proc_macro::LexError> for LexError {
fn from(e: proc_macro::LexError) -> LexError {
LexError::Compiler(e)
}
}
impl From<fallback::LexError> for LexError {
fn from(e: fallback::LexError) -> LexError {
LexError::Fallback(e)
}
}
impl fmt::Debug for LexError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
LexError::Compiler(e) => e.fmt(f),
LexError::Fallback(e) => e.fmt(f),
}
}
}
pub enum TokenTreeIter {
Compiler(proc_macro::token_stream::IntoIter),
Fallback(fallback::TokenTreeIter),
}
impl IntoIterator for TokenStream {
type Item = TokenTree;
type IntoIter = TokenTreeIter;
fn into_iter(self) -> TokenTreeIter {
match self {
TokenStream::Compiler(tts) => TokenTreeIter::Compiler(tts.into_iter()),
TokenStream::Fallback(tts) => TokenTreeIter::Fallback(tts.into_iter()),
}
}
}
impl Iterator for TokenTreeIter {
type Item = TokenTree;
fn next(&mut self) -> Option<TokenTree> {
let token = match self {
TokenTreeIter::Compiler(iter) => iter.next()?,
TokenTreeIter::Fallback(iter) => return iter.next(),
};
Some(match token {
proc_macro::TokenTree::Group(tt) => ::Group::_new(Group::Compiler(tt)).into(),
proc_macro::TokenTree::Punct(tt) => {
let spacing = match tt.spacing() {
proc_macro::Spacing::Joint => Spacing::Joint,
proc_macro::Spacing::Alone => Spacing::Alone,
};
let mut o = Punct::new(tt.as_char(), spacing);
o.set_span(::Span::_new(Span::Compiler(tt.span())));
o.into()
}
proc_macro::TokenTree::Ident(s) => ::Ident::_new(Ident::Compiler(s)).into(),
proc_macro::TokenTree::Literal(l) => ::Literal::_new(Literal::Compiler(l)).into(),
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
TokenTreeIter::Compiler(tts) => tts.size_hint(),
TokenTreeIter::Fallback(tts) => tts.size_hint(),
}
}
}
impl fmt::Debug for TokenTreeIter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TokenTreeIter").finish()
}
}
#[derive(Clone, PartialEq, Eq)]
#[cfg(super_unstable)]
pub enum SourceFile {
Compiler(proc_macro::SourceFile),
Fallback(fallback::SourceFile),
}
#[cfg(super_unstable)]
impl SourceFile {
fn nightly(sf: proc_macro::SourceFile) -> Self {
SourceFile::Compiler(sf)
}
/// Get the path to this source file as a string.
pub fn path(&self) -> PathBuf {
match self {
SourceFile::Compiler(a) => a.path(),
SourceFile::Fallback(a) => a.path(),
}
}
pub fn is_real(&self) -> bool {
match self {
SourceFile::Compiler(a) => a.is_real(),
SourceFile::Fallback(a) => a.is_real(),
}
}
}
#[cfg(super_unstable)]
impl fmt::Debug for SourceFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
SourceFile::Compiler(a) => a.fmt(f),
SourceFile::Fallback(a) => a.fmt(f),
}
}
}
pub struct LineColumn {
pub line: usize,
pub column: usize,
}
#[derive(Copy, Clone)]
pub enum Span {
Compiler(proc_macro::Span),
Fallback(fallback::Span),
}
impl Span {
pub fn call_site() -> Span {
if nightly_works() {
Span::Compiler(proc_macro::Span::call_site())
} else {
Span::Fallback(fallback::Span::call_site())
}
}
#[cfg(super_unstable)]
pub fn def_site() -> Span {
if nightly_works() {
Span::Compiler(proc_macro::Span::def_site())
} else {
Span::Fallback(fallback::Span::def_site())
}
}
#[cfg(super_unstable)]
pub fn resolved_at(&self, other: Span) -> Span {
match (self, other) {
(Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)),
(Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)),
_ => mismatch(),
}
}
#[cfg(super_unstable)]
pub fn located_at(&self, other: Span) -> Span {
match (self, other) {
(Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)),
(Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)),
_ => mismatch(),
}
}
pub fn unwrap(self) -> proc_macro::Span {
match self {
Span::Compiler(s) => s,
Span::Fallback(_) => panic!("proc_macro::Span is only available in procedural macros"),
}
}
#[cfg(super_unstable)]
pub fn source_file(&self) -> SourceFile {
match self {
Span::Compiler(s) => SourceFile::nightly(s.source_file()),
Span::Fallback(s) => SourceFile::Fallback(s.source_file()),
}
}
#[cfg(super_unstable)]
pub fn start(&self) -> LineColumn {
match self {
Span::Compiler(s) => {
let proc_macro::LineColumn { line, column } = s.start();
LineColumn { line, column }
}
Span::Fallback(s) => {
let fallback::LineColumn { line, column } = s.start();
LineColumn { line, column }
}
}
}
#[cfg(super_unstable)]
pub fn end(&self) -> LineColumn {
match self {
Span::Compiler(s) => {
let proc_macro::LineColumn { line, column } = s.end();
LineColumn { line, column }
}
Span::Fallback(s) => {
let fallback::LineColumn { line, column } = s.end();
LineColumn { line, column }
}
}
}
#[cfg(super_unstable)]
pub fn join(&self, other: Span) -> Option<Span> {
let ret = match (self, other) {
(Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.join(b)?),
(Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?),
_ => return None,
};
Some(ret)
}
#[cfg(super_unstable)]
pub fn eq(&self, other: &Span) -> bool {
match (self, other) {
(Span::Compiler(a), Span::Compiler(b)) => a.eq(b),
(Span::Fallback(a), Span::Fallback(b)) => a.eq(b),
_ => false,
}
}
fn unwrap_nightly(self) -> proc_macro::Span {
match self {
Span::Compiler(s) => s,
Span::Fallback(_) => mismatch(),
}
}
}
impl From<proc_macro::Span> for ::Span {
fn from(proc_span: proc_macro::Span) -> ::Span {
::Span::_new(Span::Compiler(proc_span))
}
}
impl From<fallback::Span> for Span {
fn from(inner: fallback::Span) -> Span {
Span::Fallback(inner)
}
}
impl fmt::Debug for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Span::Compiler(s) => s.fmt(f),
Span::Fallback(s) => s.fmt(f),
}
}
}
pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) {
match span {
Span::Compiler(s) => {
debug.field("span", &s);
}
Span::Fallback(s) => fallback::debug_span_field_if_nontrivial(debug, s),
}
}
#[derive(Clone)]
pub enum Group {
Compiler(proc_macro::Group),
Fallback(fallback::Group),
}
impl Group {
pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group {
match stream {
TokenStream::Compiler(stream) => {
let delimiter = match delimiter {
Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis,
Delimiter::Bracket => proc_macro::Delimiter::Bracket,
Delimiter::Brace => proc_macro::Delimiter::Brace,
Delimiter::None => proc_macro::Delimiter::None,
};
Group::Compiler(proc_macro::Group::new(delimiter, stream))
}
TokenStream::Fallback(stream) => {
Group::Fallback(fallback::Group::new(delimiter, stream))
}
}
}
pub fn delimiter(&self) -> Delimiter {
match self {
Group::Compiler(g) => match g.delimiter() {
proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis,
proc_macro::Delimiter::Bracket => Delimiter::Bracket,
proc_macro::Delimiter::Brace => Delimiter::Brace,
proc_macro::Delimiter::None => Delimiter::None,
},
Group::Fallback(g) => g.delimiter(),
}
}
pub fn stream(&self) -> TokenStream {
match self {
Group::Compiler(g) => TokenStream::Compiler(g.stream()),
Group::Fallback(g) => TokenStream::Fallback(g.stream()),
}
}
pub fn span(&self) -> Span {
match self {
Group::Compiler(g) => Span::Compiler(g.span()),
Group::Fallback(g) => Span::Fallback(g.span()),
}
}
#[cfg(super_unstable)]
pub fn span_open(&self) -> Span {
match self {
Group::Compiler(g) => Span::Compiler(g.span_open()),
Group::Fallback(g) => Span::Fallback(g.span_open()),
}
}
#[cfg(super_unstable)]
pub fn span_close(&self) -> Span {
match self {
Group::Compiler(g) => Span::Compiler(g.span_close()),
Group::Fallback(g) => Span::Fallback(g.span_close()),
}
}
pub fn set_span(&mut self, span: Span) {
match (self, span) {
(Group::Compiler(g), Span::Compiler(s)) => g.set_span(s),
(Group::Fallback(g), Span::Fallback(s)) => g.set_span(s),
_ => mismatch(),
}
}
fn unwrap_nightly(self) -> proc_macro::Group {
match self {
Group::Compiler(g) => g,
Group::Fallback(_) => mismatch(),
}
}
}
impl From<fallback::Group> for Group {
fn from(g: fallback::Group) -> Self {
Group::Fallback(g)
}
}
impl fmt::Display for Group {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Group::Compiler(group) => group.fmt(formatter),
Group::Fallback(group) => group.fmt(formatter),
}
}
}
impl fmt::Debug for Group {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Group::Compiler(group) => group.fmt(formatter),
Group::Fallback(group) => group.fmt(formatter),
}
}
}
#[derive(Clone)]
pub enum Ident {
Compiler(proc_macro::Ident),
Fallback(fallback::Ident),
}
impl Ident {
pub fn new(string: &str, span: Span) -> Ident {
match span {
Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new(string, s)),
Span::Fallback(s) => Ident::Fallback(fallback::Ident::new(string, s)),
}
}
pub fn new_raw(string: &str, span: Span) -> Ident {
match span {
Span::Compiler(s) => {
let p: proc_macro::TokenStream = string.parse().unwrap();
let ident = match p.into_iter().next() {
Some(proc_macro::TokenTree::Ident(mut i)) => {
i.set_span(s);
i
}
_ => panic!(),
};
Ident::Compiler(ident)
}
Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw(string, s)),
}
}
pub fn span(&self) -> Span {
match self {
Ident::Compiler(t) => Span::Compiler(t.span()),
Ident::Fallback(t) => Span::Fallback(t.span()),
}
}
pub fn set_span(&mut self, span: Span) {
match (self, span) {
(Ident::Compiler(t), Span::Compiler(s)) => t.set_span(s),
(Ident::Fallback(t), Span::Fallback(s)) => t.set_span(s),
_ => mismatch(),
}
}
fn unwrap_nightly(self) -> proc_macro::Ident {
match self {
Ident::Compiler(s) => s,
Ident::Fallback(_) => mismatch(),
}
}
}
impl PartialEq for Ident {
fn eq(&self, other: &Ident) -> bool {
match (self, other) {
(Ident::Compiler(t), Ident::Compiler(o)) => t.to_string() == o.to_string(),
(Ident::Fallback(t), Ident::Fallback(o)) => t == o,
_ => mismatch(),
}
}
}
impl<T> PartialEq<T> for Ident
where
T: ?Sized + AsRef<str>,
{
fn eq(&self, other: &T) -> bool {
let other = other.as_ref();
match self {
Ident::Compiler(t) => t.to_string() == other,
Ident::Fallback(t) => t == other,
}
}
}
impl fmt::Display for Ident {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Ident::Compiler(t) => t.fmt(f),
Ident::Fallback(t) => t.fmt(f),
}
}
}
impl fmt::Debug for Ident {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Ident::Compiler(t) => t.fmt(f),
Ident::Fallback(t) => t.fmt(f),
}
}
}
#[derive(Clone)]
pub enum Literal {
Compiler(proc_macro::Literal),
Fallback(fallback::Literal),
}
macro_rules! suffixed_numbers {
($($name:ident => $kind:ident,)*) => ($(
pub fn $name(n: $kind) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::$name(n))
} else {
Literal::Fallback(fallback::Literal::$name(n))
}
}
)*)
}
macro_rules! unsuffixed_integers {
($($name:ident => $kind:ident,)*) => ($(
pub fn $name(n: $kind) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::$name(n))
} else {
Literal::Fallback(fallback::Literal::$name(n))
}
}
)*)
}
impl Literal {
suffixed_numbers! {
u8_suffixed => u8,
u16_suffixed => u16,
u32_suffixed => u32,
u64_suffixed => u64,
usize_suffixed => usize,
i8_suffixed => i8,
i16_suffixed => i16,
i32_suffixed => i32,
i64_suffixed => i64,
isize_suffixed => isize,
f32_suffixed => f32,
f64_suffixed => f64,
}
#[cfg(u128)]
suffixed_numbers! {
i128_suffixed => i128,
u128_suffixed => u128,
}
unsuffixed_integers! {
u8_unsuffixed => u8,
u16_unsuffixed => u16,
u32_unsuffixed => u32,
u64_unsuffixed => u64,
usize_unsuffixed => usize,
i8_unsuffixed => i8,
i16_unsuffixed => i16,
i32_unsuffixed => i32,
i64_unsuffixed => i64,
isize_unsuffixed => isize,
}
#[cfg(u128)]
unsuffixed_integers! {
i128_unsuffixed => i128,
u128_unsuffixed => u128,
}
pub fn f32_unsuffixed(f: f32) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f))
} else {
Literal::Fallback(fallback::Literal::f32_unsuffixed(f))
}
}
pub fn f64_unsuffixed(f: f64) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::f64_unsuffixed(f))
} else {
Literal::Fallback(fallback::Literal::f64_unsuffixed(f))
}
}
pub fn string(t: &str) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::string(t))
} else {
Literal::Fallback(fallback::Literal::string(t))
}
}
pub fn character(t: char) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::character(t))
} else {
Literal::Fallback(fallback::Literal::character(t))
}
}
pub fn byte_string(bytes: &[u8]) -> Literal {
if nightly_works() {
Literal::Compiler(proc_macro::Literal::byte_string(bytes))
} else {
Literal::Fallback(fallback::Literal::byte_string(bytes))
}
}
pub fn span(&self) -> Span {
match self {
Literal::Compiler(lit) => Span::Compiler(lit.span()),
Literal::Fallback(lit) => Span::Fallback(lit.span()),
}
}
pub fn set_span(&mut self, span: Span) {
match (self, span) {
(Literal::Compiler(lit), Span::Compiler(s)) => lit.set_span(s),
(Literal::Fallback(lit), Span::Fallback(s)) => lit.set_span(s),
_ => mismatch(),
}
}
fn unwrap_nightly(self) -> proc_macro::Literal {
match self {
Literal::Compiler(s) => s,
Literal::Fallback(_) => mismatch(),
}
}
}
impl From<fallback::Literal> for Literal {
fn from(s: fallback::Literal) -> Literal {
Literal::Fallback(s)
}
}
impl fmt::Display for Literal {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Literal::Compiler(t) => t.fmt(f),
Literal::Fallback(t) => t.fmt(f),
}
}
}
impl fmt::Debug for Literal {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Literal::Compiler(t) => t.fmt(f),
Literal::Fallback(t) => t.fmt(f),
}
}
}
| 29.960954 | 99 | 0.523385 |
50c0d2fbf7eb633c9f48b7a085f99bc3a5fe155a | 196,605 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub mod namespaces {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_authorization_rules(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRuleListResult, list_authorization_rules::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/AuthorizationRules",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_authorization_rules::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_authorization_rules::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_authorization_rules::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: SbAuthorizationRuleListResult =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
list_authorization_rules::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_authorization_rules {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, get_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/AuthorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, authorization_rule_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
get_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
authorization_rule_name: &str,
parameters: &SbAuthorizationRule,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, create_or_update_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/AuthorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, authorization_rule_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder
.build()
.context(create_or_update_authorization_rule::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(create_or_update_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(create_or_update_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(create_or_update_authorization_rule::DeserializeError { body })?;
create_or_update_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_authorization_rule::Response, delete_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/AuthorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, authorization_rule_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete_authorization_rule::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete_authorization_rule::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(delete_authorization_rule::DeserializeError { body })?;
delete_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<AccessKeys, list_keys::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/AuthorizationRules/{}/listKeys",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, authorization_rule_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
list_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn regenerate_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
authorization_rule_name: &str,
parameters: &RegenerateAccessKeyParameters,
subscription_id: &str,
) -> std::result::Result<AccessKeys, regenerate_keys::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/AuthorizationRules/{}/regenerateKeys",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, authorization_rule_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(regenerate_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(regenerate_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(regenerate_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(regenerate_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(regenerate_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(regenerate_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(regenerate_keys::DeserializeError { body })?;
regenerate_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod regenerate_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn check_name_availability(
operation_config: &crate::OperationConfig,
subscription_id: &str,
parameters: &CheckNameAvailability,
) -> std::result::Result<CheckNameAvailabilityResult, check_name_availability::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ServiceBus/CheckNameAvailability",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(check_name_availability::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(check_name_availability::BuildRequestError)?;
let rsp = client.execute(req).await.context(check_name_availability::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(check_name_availability::ResponseBytesError)?;
let rsp_value: CheckNameAvailabilityResult =
serde_json::from_slice(&body).context(check_name_availability::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(check_name_availability::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(check_name_availability::DeserializeError { body })?;
check_name_availability::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod check_name_availability {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn migrate(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
parameters: &SbNamespaceMigrate,
) -> std::result::Result<(), migrate::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrate",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(migrate::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(migrate::BuildRequestError)?;
let rsp = client.execute(req).await.context(migrate::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(migrate::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(migrate::DeserializeError { body })?;
migrate::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod migrate {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<SbNamespaceListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ServiceBus/namespaces",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: SbNamespaceListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<SbNamespaceListResult, list_by_resource_group::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces",
&operation_config.base_path, subscription_id, resource_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_resource_group::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: SbNamespaceListResult =
serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
list_by_resource_group::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<SbNamespace, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: SbNamespace = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
parameters: &SbNamespace,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: SbNamespace = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: SbNamespace = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(SbNamespace),
Created201(SbNamespace),
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
parameters: &SbNamespaceUpdateParameters,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: SbNamespace = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: SbNamespace = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Created201(rsp_value))
}
StatusCode::ACCEPTED => Ok(update::Response::Accepted202),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(SbNamespace),
Created201(SbNamespace),
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_network_rule_set(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<NetworkRuleSet, get_network_rule_set::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/networkRuleSets/default",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_network_rule_set::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_network_rule_set::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_network_rule_set::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_network_rule_set::ResponseBytesError)?;
let rsp_value: NetworkRuleSet = serde_json::from_slice(&body).context(get_network_rule_set::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_network_rule_set::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_network_rule_set::DeserializeError { body })?;
get_network_rule_set::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_network_rule_set {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update_network_rule_set(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
parameters: &NetworkRuleSet,
) -> std::result::Result<NetworkRuleSet, create_or_update_network_rule_set::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/networkRuleSets/default",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update_network_rule_set::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update_network_rule_set::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(create_or_update_network_rule_set::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_network_rule_set::ResponseBytesError)?;
let rsp_value: NetworkRuleSet =
serde_json::from_slice(&body).context(create_or_update_network_rule_set::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_network_rule_set::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(create_or_update_network_rule_set::DeserializeError { body })?;
create_or_update_network_rule_set::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update_network_rule_set {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_network_rule_sets(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<NetworkRuleSetListResult, list_network_rule_sets::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/networkRuleSets",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_network_rule_sets::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_network_rule_sets::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_network_rule_sets::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_network_rule_sets::ResponseBytesError)?;
let rsp_value: NetworkRuleSetListResult =
serde_json::from_slice(&body).context(list_network_rule_sets::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_network_rule_sets::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_network_rule_sets::DeserializeError { body })?;
list_network_rule_sets::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_network_rule_sets {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod queues {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_authorization_rules(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRuleListResult, list_authorization_rules::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}/authorizationRules",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_authorization_rules::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_authorization_rules::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_authorization_rules::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: SbAuthorizationRuleListResult =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
list_authorization_rules::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_authorization_rules {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, get_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}/authorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name, authorization_rule_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
get_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
authorization_rule_name: &str,
parameters: &SbAuthorizationRule,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, create_or_update_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}/authorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name, authorization_rule_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder
.build()
.context(create_or_update_authorization_rule::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(create_or_update_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(create_or_update_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(create_or_update_authorization_rule::DeserializeError { body })?;
create_or_update_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_authorization_rule::Response, delete_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}/authorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name, authorization_rule_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete_authorization_rule::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete_authorization_rule::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(delete_authorization_rule::DeserializeError { body })?;
delete_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<AccessKeys, list_keys::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}/authorizationRules/{}/ListKeys",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name, authorization_rule_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
list_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn regenerate_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
authorization_rule_name: &str,
parameters: &RegenerateAccessKeyParameters,
subscription_id: &str,
) -> std::result::Result<AccessKeys, regenerate_keys::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}/authorizationRules/{}/regenerateKeys" , & operation_config . base_path , subscription_id , resource_group_name , namespace_name , queue_name , authorization_rule_name) ;
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(regenerate_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(regenerate_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(regenerate_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(regenerate_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(regenerate_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(regenerate_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(regenerate_keys::DeserializeError { body })?;
regenerate_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod regenerate_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_namespace(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
skip: Option<i64>,
top: Option<i64>,
) -> std::result::Result<SbQueueListResult, list_by_namespace::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_namespace::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(skip) = skip {
req_builder = req_builder.query(&[("$skip", skip)]);
}
if let Some(top) = top {
req_builder = req_builder.query(&[("$top", top)]);
}
let req = req_builder.build().context(list_by_namespace::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_namespace::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_namespace::ResponseBytesError)?;
let rsp_value: SbQueueListResult = serde_json::from_slice(&body).context(list_by_namespace::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_namespace::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_namespace::DeserializeError { body })?;
list_by_namespace::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_namespace {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
subscription_id: &str,
) -> std::result::Result<SbQueue, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: SbQueue = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
parameters: &SbQueue,
subscription_id: &str,
) -> std::result::Result<SbQueue, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: SbQueue = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
queue_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/queues/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, queue_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod topics {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_authorization_rules(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRuleListResult, list_authorization_rules::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/authorizationRules",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_authorization_rules::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_authorization_rules::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_authorization_rules::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: SbAuthorizationRuleListResult =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
list_authorization_rules::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_authorization_rules {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, get_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/authorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, authorization_rule_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
get_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
authorization_rule_name: &str,
parameters: &SbAuthorizationRule,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, create_or_update_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/authorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, authorization_rule_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder
.build()
.context(create_or_update_authorization_rule::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(create_or_update_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(create_or_update_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(create_or_update_authorization_rule::DeserializeError { body })?;
create_or_update_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_authorization_rule::Response, delete_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/authorizationRules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, authorization_rule_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete_authorization_rule::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete_authorization_rule::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(delete_authorization_rule::DeserializeError { body })?;
delete_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<AccessKeys, list_keys::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/authorizationRules/{}/ListKeys",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, authorization_rule_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
list_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn regenerate_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
authorization_rule_name: &str,
parameters: &RegenerateAccessKeyParameters,
subscription_id: &str,
) -> std::result::Result<AccessKeys, regenerate_keys::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/authorizationRules/{}/regenerateKeys" , & operation_config . base_path , subscription_id , resource_group_name , namespace_name , topic_name , authorization_rule_name) ;
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(regenerate_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(regenerate_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(regenerate_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(regenerate_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(regenerate_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(regenerate_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(regenerate_keys::DeserializeError { body })?;
regenerate_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod regenerate_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_namespace(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
skip: Option<i64>,
top: Option<i64>,
) -> std::result::Result<SbTopicListResult, list_by_namespace::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_namespace::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(skip) = skip {
req_builder = req_builder.query(&[("$skip", skip)]);
}
if let Some(top) = top {
req_builder = req_builder.query(&[("$top", top)]);
}
let req = req_builder.build().context(list_by_namespace::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_namespace::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_namespace::ResponseBytesError)?;
let rsp_value: SbTopicListResult = serde_json::from_slice(&body).context(list_by_namespace::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_namespace::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_namespace::DeserializeError { body })?;
list_by_namespace::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_namespace {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_id: &str,
) -> std::result::Result<SbTopic, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: SbTopic = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
parameters: &SbTopic,
subscription_id: &str,
) -> std::result::Result<SbTopic, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: SbTopic = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod disaster_recovery_configs {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn check_name_availability(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
parameters: &CheckNameAvailability,
) -> std::result::Result<CheckNameAvailabilityResult, check_name_availability::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/CheckNameAvailability" , & operation_config . base_path , subscription_id , resource_group_name , namespace_name) ;
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(check_name_availability::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(check_name_availability::BuildRequestError)?;
let rsp = client.execute(req).await.context(check_name_availability::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(check_name_availability::ResponseBytesError)?;
let rsp_value: CheckNameAvailabilityResult =
serde_json::from_slice(&body).context(check_name_availability::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(check_name_availability::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(check_name_availability::DeserializeError { body })?;
check_name_availability::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod check_name_availability {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<ArmDisasterRecoveryListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ArmDisasterRecoveryListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
subscription_id: &str,
) -> std::result::Result<ArmDisasterRecovery, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, alias
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ArmDisasterRecovery = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
parameters: &ArmDisasterRecovery,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, alias
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ArmDisasterRecovery = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => Ok(create_or_update::Response::Created201),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ArmDisasterRecovery),
Created201,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
subscription_id: &str,
) -> std::result::Result<(), delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, alias
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn break_pairing(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
subscription_id: &str,
) -> std::result::Result<(), break_pairing::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}/breakPairing",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, alias
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(break_pairing::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(break_pairing::BuildRequestError)?;
let rsp = client.execute(req).await.context(break_pairing::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(break_pairing::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(break_pairing::DeserializeError { body })?;
break_pairing::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod break_pairing {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn fail_over(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
subscription_id: &str,
) -> std::result::Result<(), fail_over::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}/failover",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, alias
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(fail_over::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(fail_over::BuildRequestError)?;
let rsp = client.execute(req).await.context(fail_over::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(fail_over::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(fail_over::DeserializeError { body })?;
fail_over::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod fail_over {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_authorization_rules(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRuleListResult, list_authorization_rules::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}/AuthorizationRules" , & operation_config . base_path , subscription_id , resource_group_name , namespace_name , alias) ;
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_authorization_rules::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_authorization_rules::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_authorization_rules::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: SbAuthorizationRuleListResult =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_authorization_rules::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(list_authorization_rules::DeserializeError { body })?;
list_authorization_rules::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_authorization_rules {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_authorization_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<SbAuthorizationRule, get_authorization_rule::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}/AuthorizationRules/{}" , & operation_config . base_path , subscription_id , resource_group_name , namespace_name , alias , authorization_rule_name) ;
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_authorization_rule::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_authorization_rule::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_authorization_rule::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: SbAuthorizationRule =
serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_authorization_rule::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_authorization_rule::DeserializeError { body })?;
get_authorization_rule::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_authorization_rule {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
alias: &str,
authorization_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<AccessKeys, list_keys::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/disasterRecoveryConfigs/{}/AuthorizationRules/{}/listKeys" , & operation_config . base_path , subscription_id , resource_group_name , namespace_name , alias , authorization_rule_name) ;
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: AccessKeys = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?;
list_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod event_hubs {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_by_namespace(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<EventHubListResult, list_by_namespace::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/eventhubs",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_namespace::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_namespace::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_namespace::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_namespace::ResponseBytesError)?;
let rsp_value: EventHubListResult = serde_json::from_slice(&body).context(list_by_namespace::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_namespace::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_namespace::DeserializeError { body })?;
list_by_namespace::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_namespace {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod migration_configs {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
subscription_id: &str,
) -> std::result::Result<MigrationConfigListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrationConfigurations",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: MigrationConfigListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
config_name: &str,
subscription_id: &str,
) -> std::result::Result<MigrationConfigProperties, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrationConfigurations/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, config_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: MigrationConfigProperties = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_and_start_migration(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
config_name: &str,
parameters: &MigrationConfigProperties,
subscription_id: &str,
) -> std::result::Result<create_and_start_migration::Response, create_and_start_migration::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrationConfigurations/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, config_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_and_start_migration::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_and_start_migration::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_and_start_migration::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_and_start_migration::ResponseBytesError)?;
let rsp_value: MigrationConfigProperties =
serde_json::from_slice(&body).context(create_and_start_migration::DeserializeError { body })?;
Ok(create_and_start_migration::Response::Ok200(rsp_value))
}
StatusCode::CREATED => Ok(create_and_start_migration::Response::Created201),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_and_start_migration::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(create_and_start_migration::DeserializeError { body })?;
create_and_start_migration::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_and_start_migration {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(MigrationConfigProperties),
Created201,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
config_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrationConfigurations/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, config_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn complete_migration(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
config_name: &str,
subscription_id: &str,
) -> std::result::Result<(), complete_migration::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrationConfigurations/{}/upgrade",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, config_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(complete_migration::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(complete_migration::BuildRequestError)?;
let rsp = client.execute(req).await.context(complete_migration::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(complete_migration::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(complete_migration::DeserializeError { body })?;
complete_migration::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod complete_migration {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn revert(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
config_name: &str,
subscription_id: &str,
) -> std::result::Result<(), revert::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/migrationConfigurations/{}/revert",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, config_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(revert::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(revert::BuildRequestError)?;
let rsp = client.execute(req).await.context(revert::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(revert::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(revert::DeserializeError { body })?;
revert::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod revert {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod operations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/providers/Microsoft.ServiceBus/operations", &operation_config.base_path,);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: OperationListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod premium_messaging_regions {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<PremiumMessagingRegionsListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ServiceBus/premiumMessagingRegions",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: PremiumMessagingRegionsListResult =
serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod rules {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_by_subscriptions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
subscription_id: &str,
skip: Option<i64>,
top: Option<i64>,
) -> std::result::Result<RuleListResult, list_by_subscriptions::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}/rules",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_subscriptions::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(skip) = skip {
req_builder = req_builder.query(&[("$skip", skip)]);
}
if let Some(top) = top {
req_builder = req_builder.query(&[("$top", top)]);
}
let req = req_builder.build().context(list_by_subscriptions::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_subscriptions::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscriptions::ResponseBytesError)?;
let rsp_value: RuleListResult = serde_json::from_slice(&body).context(list_by_subscriptions::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscriptions::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_subscriptions::DeserializeError { body })?;
list_by_subscriptions::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_subscriptions {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<Rule, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}/rules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name, rule_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Rule = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
rule_name: &str,
parameters: &Rule,
subscription_id: &str,
) -> std::result::Result<Rule, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}/rules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name, rule_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Rule = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}/rules/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name, rule_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod regions {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_by_sku(
operation_config: &crate::OperationConfig,
subscription_id: &str,
sku: &str,
) -> std::result::Result<PremiumMessagingRegionsListResult, list_by_sku::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ServiceBus/sku/{}/regions",
&operation_config.base_path, subscription_id, sku
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_sku::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_sku::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_sku::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_sku::ResponseBytesError)?;
let rsp_value: PremiumMessagingRegionsListResult =
serde_json::from_slice(&body).context(list_by_sku::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_sku::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_sku::DeserializeError { body })?;
list_by_sku::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_sku {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod subscriptions {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_by_topic(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_id: &str,
skip: Option<i64>,
top: Option<i64>,
) -> std::result::Result<SbSubscriptionListResult, list_by_topic::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_topic::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(skip) = skip {
req_builder = req_builder.query(&[("$skip", skip)]);
}
if let Some(top) = top {
req_builder = req_builder.query(&[("$top", top)]);
}
let req = req_builder.build().context(list_by_topic::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_topic::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_topic::ResponseBytesError)?;
let rsp_value: SbSubscriptionListResult =
serde_json::from_slice(&body).context(list_by_topic::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_topic::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_topic::DeserializeError { body })?;
list_by_topic::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_topic {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
subscription_id: &str,
) -> std::result::Result<SbSubscription, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: SbSubscription = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
parameters: &SbSubscription,
subscription_id: &str,
) -> std::result::Result<SbSubscription, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: SbSubscription = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
namespace_name: &str,
topic_name: &str,
subscription_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ServiceBus/namespaces/{}/topics/{}/subscriptions/{}",
&operation_config.base_path, subscription_id, resource_group_name, namespace_name, topic_name, subscription_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
| 42.144695 | 303 | 0.56701 |
d546b027f08069c0795cf2cc289c45763def22e3 | 135 | use crate::state::routing::Route;
#[derive(Clone)]
pub enum RoutingMsg {
ModalOpen(bool),
Navigate(Route),
Push(Route),
}
| 15 | 33 | 0.651852 |
de8762eccbb7e880948fc725d6fbc53e637396e1 | 3,638 | use time;
use std::io::Write;
use std::fmt::Display;
use std::collections::HashMap;
use Priority;
use errors::*;
use facility::Facility;
#[allow(non_camel_case_types)]
#[derive(Copy,Clone)]
pub enum Severity {
LOG_EMERG,
LOG_ALERT,
LOG_CRIT,
LOG_ERR,
LOG_WARNING,
LOG_NOTICE,
LOG_INFO,
LOG_DEBUG
}
pub trait LogFormat<T> {
fn format<W: Write>(&self, w: &mut W, severity: Severity, message: T) -> Result<()>;
fn emerg<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_EMERG, message)
}
fn alert<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_ALERT, message)
}
fn crit<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_CRIT, message)
}
fn err<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_ERR, message)
}
fn warning<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_WARNING, message)
}
fn notice<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_NOTICE, message)
}
fn info<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_INFO, message)
}
fn debug<W: Write>(&mut self, w: &mut W, message: T) -> Result<()> {
self.format(w, Severity::LOG_DEBUG, message)
}
}
#[derive(Clone,Debug)]
pub struct Formatter3164 {
pub facility: Facility,
pub hostname: Option<String>,
pub process: String,
pub pid: i32,
}
impl<T: Display> LogFormat<T> for Formatter3164 {
fn format<W: Write>(&self, w: &mut W, severity: Severity, message: T) -> Result<()> {
if let Some(ref hostname) = self.hostname {
write!(w, "<{}>{} {} {}[{}]: {}",
encode_priority(severity, self.facility),
time::now().strftime("%b %d %T").unwrap(),
hostname, self.process, self.pid, message).chain_err(|| ErrorKind::Format)
} else {
write!(w, "<{}>{} {}[{}]: {}",
encode_priority(severity, self.facility),
time::now().strftime("%b %d %T").unwrap(),
self.process, self.pid, message).chain_err(|| ErrorKind::Format)
}
}
}
/// RFC 5424 structured data
pub type StructuredData = HashMap<String, HashMap<String, String>>;
#[derive(Clone,Debug)]
pub struct Formatter5424 {
pub facility: Facility,
pub hostname: Option<String>,
pub process: String,
pub pid: i32,
}
impl Formatter5424 {
pub fn format_5424_structured_data(&self, data: StructuredData) -> String {
if data.is_empty() {
"-".to_string()
} else {
let mut res = String::new();
for (id, params) in &data {
res = res + "["+id;
for (name,value) in params {
res = res + " " + name + "=\"" + value + "\"";
}
res += "]";
}
res
}
}
}
impl<T: Display> LogFormat<(i32, StructuredData, T)> for Formatter5424 {
fn format<W: Write>(&self, w: &mut W, severity: Severity, log_message: (i32, StructuredData, T)) -> Result<()> {
let (message_id, data, message) = log_message;
write!(w, "<{}>{} {} {} {} {} {} {} {}",
encode_priority(severity, self.facility),
1, // version
time::now_utc().rfc3339(),
self.hostname.as_ref().map(|x| &x[..]).unwrap_or("localhost"),
self.process, self.pid, message_id,
self.format_5424_structured_data(data), message).chain_err(|| ErrorKind::Format)
}
}
fn encode_priority(severity: Severity, facility: Facility) -> Priority {
facility as u8 | severity as u8
}
| 27.984615 | 116 | 0.595932 |
e542986ccc406d0898fd20a7758c2a6ed49e4493 | 713 | // The error bound for comparing floating point numericals, i.e. numericals within this
// error bound are considered equal.
pub const EPSILON: f64 = 1e-6;
// A finer error bound for comparing values with zeros. i.e. numericals with absolute value
// less than this constant will be considered zero.
pub const EPSILON_TINY: f64 = 1e-9;
// Represents invalid id. Note that ids are mostly represented as unsigned ints and longs,
// therefore this value should usually be converted to very large numbers (2^32-1 or 2^64-1),
// which is rarely reached and thus serve as an invalid id.
pub const INVALID_ID: i64 = -1;
// A sufficiently large number representing (positive) infinity.
pub const INFINITY: f64 = 1e20;
| 44.5625 | 93 | 0.758766 |
fcf92d4ef73d3b90d4d06ed3ca22676a8d7555fe | 1,509 | //! Abstractions for reading with digital buttons at
//! a logical level. In this context, a button is "off" when it is in its
//! default state, and "on" after an action has been taken. If a button
//! has no single default state, then logical "on" should correspond with
//! any marking indicators that are present or the expected conventions
//! for the use of the switch.
use digital::DigitalInput;
/// Reads the logical state of a button.
pub trait Btn {
/// Returns true if the button is active.
fn on(&self) -> bool;
/// Returns true if the button is not active.
fn off(&self) -> bool { !self.on() }
}
/// A wrapper for a [DigitalInput](../digital/index.html) that is
/// logically active when the digital input signal is high (true, 1).
pub struct BtnHigh<T: DigitalInput> {
pub pin: T,
}
impl<T: DigitalInput> BtnHigh<T> {
/// Returns a new `BtnHigh` object.
pub const fn new(pin: T) -> Self {
BtnHigh { pin }
}
}
impl<T: DigitalInput> Btn for BtnHigh<T> {
fn on(&self) -> bool {
self.pin.input()
}
}
/// A wrapper for a [DigitalInput](../digital/index.html) that is
/// logically active when the digital input signal is low (zero, 0).
pub struct BtnLow<T: DigitalInput> {
pub pin: T,
}
impl<T: DigitalInput> BtnLow<T> {
/// Returns a new `BtnLow` object.
pub const fn new(pin: T) -> Self {
BtnLow { pin }
}
}
impl<T: DigitalInput> Btn for BtnLow<T> {
fn on(&self) -> bool {
!self.pin.input()
}
} | 27.944444 | 73 | 0.638834 |
8a6875d6328de4275f7c29bd811895294ed2b09a | 4,175 | //! `rusty_audio` is a convenient sound library for small projects and educational purposes. For
//! more elaborate needs, please use [rodio](https://github.com/tomaka/rodio), which is the much
//! more powerful audio library that this one uses.
//!
//! Example
//! =======
//! ```
//! use rusty_audio::Audio;
//! let mut audio = Audio::new();
//! audio.add("startup", "audio_subsystem_initialized.mp3");
//! audio.play("startup"); // Execution continues while playback occurs in another thread.
//! audio.wait(); // Block until no sounds are playing
//! ```
use rodio::{
source::{Buffered, Source},
Decoder, Sink,
};
use std::collections::HashMap;
use std::fs::File;
use std::io::{Cursor, Read};
pub mod prelude {
pub use crate::Audio;
}
/// A simple 4-track audio system to load/decode audio files from disk to play later. Supported
/// formats are: MP3, WAV, Vorbis and Flac.
#[derive(Default)]
pub struct Audio {
clips: HashMap<&'static str, Buffered<Decoder<Cursor<Vec<u8>>>>>,
channels: Vec<Sink>,
current_channel: usize,
}
impl Audio {
/// Create a new sound subsystem. You only need one of these -- you can use it to load and play
/// any number of audio clips.
pub fn new() -> Self {
let endpoint = rodio::default_output_device().unwrap();
let clips = HashMap::new();
let mut channels: Vec<Sink> = Vec::new();
for _ in 0..4 {
channels.push(Sink::new(&endpoint))
}
Self {
clips,
channels,
current_channel: 0,
}
}
/// Add an audio clip to play. Audio clips will be decoded and buffered during this call so
/// the first call to `.play()` is not staticky if you compile in debug mode. `name` is what
/// you will refer to this clip as when you need to play it. Files known to be supported by the
/// underlying library (rodio) at the time of this writing are MP3, WAV, Vorbis and Flac.
pub fn add(&mut self, name: &'static str, path: &str) {
let mut file_vec: Vec<u8> = Vec::new();
File::open(path)
.expect("Couldn't find audio file to add.")
.read_to_end(&mut file_vec)
.expect("Failed reading in opened audio file.");
let cursor = Cursor::new(file_vec);
let decoder = Decoder::new(cursor).unwrap();
let buffered = decoder.buffered();
// Buffers are lazily decoded, which often leads to static on first play on low-end systems
// or when you compile in debug mode. Since this library is intended for educational
// projects, those are going to be common conditions. So, to optimize for our use-case, we
// will pre-warm all of our audio buffers by forcing things to be decoded and cached right
// now when we first load the file. I would like to find a cleaner way to do this, but the
// following scheme (iterating through a clone and discarding the decoded frames) works
// since clones of a Buffered share the actual decoded data buffer cache by means of Arc and
// Mutex.
let warm = buffered.clone();
for i in warm {
#[allow(clippy::drop_copy)]
drop(i);
}
self.clips.insert(name, buffered);
}
/// Play an audio clip that has already been loaded. `name` is the name you chose when you
/// added the clip to the `Audio` system. If you forgot to load the clip first, this will crash.
pub fn play(&mut self, name: &str) {
let buffer = self.clips.get(name).expect("No clip by that name.").clone();
self.channels[self.current_channel].append(buffer);
self.current_channel += 1;
if self.current_channel >= self.channels.len() {
self.current_channel = 0;
}
}
/// Block until no sounds are playing. Convenient for keeping a thread alive until all sounds
/// have played.
pub fn wait(&self) {
loop {
if self.channels.iter().any(|x| !x.empty()) {
std::thread::sleep(std::time::Duration::from_millis(50));
continue;
}
break;
}
}
}
| 40.931373 | 100 | 0.62012 |
1661cce62d16d867ee17de35d4c11021037bd75c | 3,260 | use miniaudio::{Device, DeviceConfig, DeviceType, SyncDecoder};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
pub fn main() {
let file =
std::fs::File::open("miniaudio/examples/assets/exit.wav").expect("failed to open exit.wav");
let decoder =
SyncDecoder::from_read(file, None).expect("failed to initialize decoder from file");
let mut config = DeviceConfig::new(DeviceType::Playback);
config.playback_mut().set_format(decoder.output_format());
config
.playback_mut()
.set_channels(decoder.output_channels());
config.set_sample_rate(decoder.output_sample_rate());
// This is set to true when a rewind is requested.
let rewind = Arc::new(AtomicBool::new(false));
let playback_rewind = Arc::clone(&rewind);
let playback_decoder = decoder.clone();
config.set_data_callback(move |_device, output, _frames| {
if !playback_rewind.load(Ordering::Acquire) {
let frames = playback_decoder.read_pcm_frames(output);
// If there were no more frames read, request a rewind.
if frames == 0 {
playback_rewind.store(true, Ordering::Release);
}
}
});
config.set_stop_callback(|_device| {
println!("Device Stopped.");
});
let device = Device::new(None, &config).expect("failed to open playback device");
device.start().expect("failed to start device");
println!("Device Backend: {:?}", device.context().backend());
let (send_shutdown, recv_shutdown) = std::sync::mpsc::channel::<bool>();
// This time we actually wait for you to press enter on a different thread.
let wait_thread = std::thread::spawn(move || {
wait_for_enter();
send_shutdown
.send(true)
.expect("failed to send shutdown request");
});
// In here we just loop and rewind th decoder.
loop {
// If a rewind was requested, rewind the decoder and reset the flag.
if rewind.load(Ordering::Acquire) {
println!("rewind requested...");
decoder
.seek_to_pcm_frame(0)
.expect("error occurred while rewinding");
rewind.store(false, Ordering::Release);
}
match recv_shutdown.try_recv() {
// Received a request to shutdown.
Ok(_) => break,
// Nothing happened so this is a NOP.
Err(std::sync::mpsc::TryRecvError::Empty) => {}
// The input wait thread disconnected for some reason so we should bail.
Err(std::sync::mpsc::TryRecvError::Disconnected) => {
break;
}
}
std::thread::yield_now();
}
println!("Shutting Down...");
wait_thread.join().expect("failed to join wait thread");
}
/// Shows a prompt and waits for input on stdin.
fn wait_for_enter() {
use std::io::Write;
println!("Press ENTER/RETURN to exit...");
// Make sure the line above is displayed:
std::io::stdout().flush().expect("failed to flush stdout");
// Just read some random line off of stdin and discard it:
std::io::stdin()
.read_line(&mut String::new())
.expect("failed to wait for line");
}
| 33.265306 | 100 | 0.609509 |
03babd400a9f07d8574b328389b1decfd37d2eb1 | 7,412 | use crate::common::{jcli::JCli, jormungandr::ConfigurationBuilder, startup};
use jormungandr_lib::interfaces::{ActiveSlotCoefficient, KESUpdateSpeed};
use jormungandr_testing_utils::{
testing::{
benchmark_efficiency, benchmark_endurance, EfficiencyBenchmarkDef,
EfficiencyBenchmarkFinish, Endurance, Thresholds,
},
wallet::Wallet,
};
use std::{iter, time::Duration};
#[test]
pub fn test_100_transaction_is_processed_in_10_packs_to_many_accounts() {
let receivers: Vec<Wallet> = iter::from_fn(|| Some(startup::create_new_account_address()))
.take(10)
.collect();
send_and_measure_100_transaction_in_10_packs_for_recievers(
receivers,
"100_transaction_are_processed_in_10_packs_to_many_accounts",
);
}
#[test]
pub fn test_100_transaction_is_processed_in_10_packs_to_single_account() {
let single_reciever = startup::create_new_account_address();
let receivers: Vec<Wallet> = iter::from_fn(|| Some(single_reciever.clone()))
.take(10)
.collect();
send_and_measure_100_transaction_in_10_packs_for_recievers(
receivers,
"100_transaction_are_processed_in_10_packs_to_single_account",
);
}
fn send_and_measure_100_transaction_in_10_packs_for_recievers(receivers: Vec<Wallet>, info: &str) {
let pack_size = 10;
let target = (pack_size * receivers.len()) as u32;
let efficiency_benchmark_result = send_100_transaction_in_10_packs_for_recievers(
pack_size,
receivers,
benchmark_efficiency(info.to_owned()).target(target),
);
efficiency_benchmark_result.print();
}
fn send_100_transaction_in_10_packs_for_recievers(
iterations_count: usize,
receivers: Vec<Wallet>,
efficiency_benchmark_def: &mut EfficiencyBenchmarkDef,
) -> EfficiencyBenchmarkFinish {
let mut sender = startup::create_new_account_address();
let jcli: JCli = Default::default();
let (jormungandr, _) = startup::start_stake_pool(
&[sender.clone()],
&[],
ConfigurationBuilder::new()
.with_slots_per_epoch(60)
.with_consensus_genesis_praos_active_slot_coeff(ActiveSlotCoefficient::MAXIMUM)
.with_slot_duration(2)
.with_kes_update_speed(KESUpdateSpeed::new(43200).unwrap()),
)
.unwrap();
let output_value = 1 as u64;
let mut efficiency_benchmark_run = efficiency_benchmark_def.start();
for i in 0..iterations_count {
let transation_messages: Vec<String> = receivers
.iter()
.map(|receiver| {
let message = jcli
.transaction_builder(jormungandr.genesis_block_hash())
.new_transaction()
.add_account(&sender.address().to_string(), &output_value.into())
.add_output(&receiver.address().to_string(), output_value.into())
.finalize()
.seal_with_witness_for_address(&sender)
.to_message();
sender.confirm_transaction();
message
})
.collect();
println!("Sending pack of 10 transaction no. {}", i);
if let Err(err) = super::send_transaction_and_ensure_block_was_produced(
&transation_messages,
&jormungandr,
) {
return efficiency_benchmark_run.exception(err.to_string());
}
efficiency_benchmark_run.increment_by(receivers.len() as u32);
}
efficiency_benchmark_run.stop()
}
#[test]
pub fn test_100_transaction_is_processed_simple() {
let transaction_max_count = 100;
let mut sender = startup::create_new_account_address();
let receiver = startup::create_new_account_address();
let jcli: JCli = Default::default();
let (jormungandr, _) = startup::start_stake_pool(
&[sender.clone()],
&[],
ConfigurationBuilder::new()
.with_slots_per_epoch(60)
.with_consensus_genesis_praos_active_slot_coeff(ActiveSlotCoefficient::MAXIMUM)
.with_slot_duration(4)
.with_kes_update_speed(KESUpdateSpeed::new(43200).unwrap()),
)
.unwrap();
let output_value = 1 as u64;
let mut benchmark = benchmark_efficiency("test_100_transaction_is_processed_simple")
.target(transaction_max_count)
.start();
for i in 0..transaction_max_count {
let transaction = jcli
.transaction_builder(jormungandr.genesis_block_hash())
.new_transaction()
.add_account(&sender.address().to_string(), &output_value.into())
.add_output(&receiver.address().to_string(), output_value.into())
.finalize()
.seal_with_witness_for_address(&sender)
.to_message();
sender.confirm_transaction();
println!("Sending transaction no. {}", i + 1);
if let Err(error) = super::check_transaction_was_processed(
transaction.to_owned(),
&receiver,
(i + 1).into(),
&jormungandr,
) {
let message = format!("{}", error);
benchmark.exception(message).print();
return;
}
benchmark.increment();
}
benchmark.stop().print();
jcli.fragments_checker(&jormungandr)
.check_log_shows_in_block()
.expect("cannot read logs");
}
#[test]
pub fn test_blocks_are_being_created_for_more_than_15_minutes() {
let mut sender = startup::create_new_account_address();
let mut receiver = startup::create_new_account_address();
let jcli: JCli = Default::default();
let (jormungandr, _) = startup::start_stake_pool(
&[sender.clone()],
&[],
ConfigurationBuilder::new()
.with_slots_per_epoch(60)
.with_consensus_genesis_praos_active_slot_coeff(ActiveSlotCoefficient::MAXIMUM)
.with_slot_duration(4)
.with_epoch_stability_depth(10)
.with_kes_update_speed(KESUpdateSpeed::new(43200).unwrap()),
)
.unwrap();
let output_value = 1 as u64;
let benchmark = benchmark_endurance("test_blocks_are_created_for_more_than_15_minutes")
.target(Duration::from_secs(900))
.start();
loop {
let transaction = jcli
.transaction_builder(jormungandr.genesis_block_hash())
.new_transaction()
.add_account(&sender.address().to_string(), &output_value.into())
.add_output(&receiver.address().to_string(), output_value.into())
.finalize()
.seal_with_witness_for_address(&sender)
.to_message();
sender.confirm_transaction();
if let Err(err) =
super::send_transaction_and_ensure_block_was_produced(&[transaction], &jormungandr)
{
let error_message = format!("{:?}", err);
// temporary threshold for the time issue with transaction stuck is resolved
let temporary_threshold =
Thresholds::<Endurance>::new_endurance(Duration::from_secs(400));
benchmark
.exception(error_message)
.print_with_thresholds(temporary_threshold);
return;
}
if benchmark.max_endurance_reached() {
benchmark.stop().print();
return;
}
std::mem::swap(&mut sender, &mut receiver);
}
}
| 35.806763 | 99 | 0.640178 |
e8b696abddb09f5c90c122ab1c0e9d4504443cef | 1,695 | /// Internal namespace.
pub( crate ) mod private
{
// use crate::prelude::*;
use core::fmt;
use core::hash::Hash;
use core::cmp::{ PartialEq, Eq };
///
/// Interface to identify an instance of somthing, for exampel a node.
///
pub trait IdentityInterface
where
Self :
'static +
Copy +
Hash +
fmt::Debug +
PartialEq +
Eq +
Hash +
{
}
impl< T > IdentityInterface for T
where
T :
'static +
Copy +
Hash +
fmt::Debug +
PartialEq +
Eq +
Hash +
,
{
}
///
/// Interface to identify an instance of somthing with ability to increase it to generate a new one.
///
pub trait IdentityGenerableInterface
where
Self : IdentityInterface + Default,
{
/// Generate a new identity based on the current increasing it.
fn next( &self ) -> Self;
/// Generate the first identity.
fn first() -> Self
{
Default::default()
}
/// Check is the identity valid.
fn is_valid( &self ) -> bool;
}
///
/// Instance has an id.
///
pub trait HasId
{
/// Id of the node.
type Id : IdentityInterface;
/// Get id.
fn id( &self ) -> Self::Id;
}
}
/// Protected namespace of the module.
pub mod protected
{
pub use super::orphan::*;
}
pub use protected::*;
/// Parented namespace of the module.
pub mod orphan
{
pub use super::exposed::*;
}
/// Exposed namespace of the module.
pub mod exposed
{
pub use super::prelude::*;
}
/// Prelude to use essentials: `use my_module::prelude::*`.
pub mod prelude
{
pub use super::private::
{
IdentityInterface,
IdentityGenerableInterface,
HasId,
};
}
| 16.456311 | 102 | 0.579351 |
ab0fb353d897fdd1da8c8dd1969b61d1d25ad923 | 22,670 | // Copyright 2018-2020 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A database-backed registry, powered by [`Diesel`](https://crates.io/crates/diesel).
//!
//! This module contains the [`DieselRegistry`], which provides an implementation of the
//! [`RwRegistry`] trait.
//!
//! [`DieselRegistry`]: ../struct.DieselRegistry.html
//! [`RwRegistry`]: ../trait.RwRegistry.html
mod models;
mod operations;
mod schema;
use diesel::r2d2::{ConnectionManager, Pool};
use super::{
MetadataPredicate, Node, NodeIter, RegistryError, RegistryReader, RegistryWriter, RwRegistry,
};
use operations::count_nodes::RegistryCountNodesOperation as _;
use operations::delete_node::RegistryDeleteNodeOperation as _;
use operations::fetch_node::RegistryFetchNodeOperation as _;
use operations::has_node::RegistryHasNodeOperation as _;
use operations::insert_node::RegistryInsertNodeOperation as _;
use operations::list_nodes::RegistryListNodesOperation as _;
use operations::RegistryOperations;
/// A database-backed registry, powered by [`Diesel`](https://crates.io/crates/diesel).
pub struct DieselRegistry<C: diesel::Connection + 'static> {
connection_pool: Pool<ConnectionManager<C>>,
}
impl<C: diesel::Connection> DieselRegistry<C> {
/// Creates a new `DieselRegistry`.
///
/// # Arguments
///
/// * `connection_pool`: connection pool for the database
pub fn new(connection_pool: Pool<ConnectionManager<C>>) -> Self {
DieselRegistry { connection_pool }
}
}
#[cfg(feature = "postgres")]
impl Clone for DieselRegistry<diesel::pg::PgConnection> {
fn clone(&self) -> Self {
Self {
connection_pool: self.connection_pool.clone(),
}
}
}
#[cfg(feature = "sqlite")]
impl Clone for DieselRegistry<diesel::sqlite::SqliteConnection> {
fn clone(&self) -> Self {
Self {
connection_pool: self.connection_pool.clone(),
}
}
}
impl<C> RegistryReader for DieselRegistry<C>
where
C: diesel::Connection,
i64: diesel::deserialize::FromSql<diesel::sql_types::BigInt, C::Backend>,
String: diesel::deserialize::FromSql<diesel::sql_types::Text, C::Backend>,
{
fn list_nodes<'a, 'b: 'a>(
&'b self,
predicates: &'a [MetadataPredicate],
) -> Result<NodeIter<'a>, RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?)
.list_nodes(predicates)
.map(|nodes| Box::new(nodes.into_iter()) as NodeIter<'a>)
}
fn count_nodes(&self, predicates: &[MetadataPredicate]) -> Result<u32, RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).count_nodes(predicates)
}
fn fetch_node(&self, identity: &str) -> Result<Option<Node>, RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).fetch_node(identity)
}
fn has_node(&self, identity: &str) -> Result<bool, RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).has_node(identity)
}
}
#[cfg(feature = "postgres")]
impl RegistryWriter for DieselRegistry<diesel::pg::PgConnection> {
fn insert_node(&self, node: Node) -> Result<(), RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).insert_node(node)
}
fn delete_node(&self, identity: &str) -> Result<Option<Node>, RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).delete_node(identity)
}
}
#[cfg(feature = "sqlite")]
impl RegistryWriter for DieselRegistry<diesel::sqlite::SqliteConnection> {
fn insert_node(&self, node: Node) -> Result<(), RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).insert_node(node)
}
fn delete_node(&self, identity: &str) -> Result<Option<Node>, RegistryError> {
RegistryOperations::new(&*self.connection_pool.get()?).delete_node(identity)
}
}
#[cfg(feature = "postgres")]
impl RwRegistry for DieselRegistry<diesel::pg::PgConnection>
where
String: diesel::deserialize::FromSql<diesel::sql_types::Text, diesel::pg::Pg>,
{
fn clone_box(&self) -> Box<dyn RwRegistry> {
Box::new(self.clone())
}
fn clone_box_as_reader(&self) -> Box<dyn RegistryReader> {
Box::new(self.clone())
}
fn clone_box_as_writer(&self) -> Box<dyn RegistryWriter> {
Box::new(self.clone())
}
}
#[cfg(feature = "sqlite")]
impl RwRegistry for DieselRegistry<diesel::sqlite::SqliteConnection>
where
String: diesel::deserialize::FromSql<diesel::sql_types::Text, diesel::sqlite::Sqlite>,
{
fn clone_box(&self) -> Box<dyn RwRegistry> {
Box::new(self.clone())
}
fn clone_box_as_reader(&self) -> Box<dyn RegistryReader> {
Box::new(self.clone())
}
fn clone_box_as_writer(&self) -> Box<dyn RegistryWriter> {
Box::new(self.clone())
}
}
#[cfg(all(test, feature = "sqlite"))]
pub mod tests {
use super::*;
use crate::migrations::run_sqlite_migrations;
use diesel::{
r2d2::{ConnectionManager, Pool},
sqlite::SqliteConnection,
};
/// Test that a new node can be inserted into the registry and fetched
///
/// 1. Setup sqlite database
/// 2. Insert node 1
/// 3. Validate that the node can be fetched correctly from state
/// 4. Try to insert the node again with same endpoints, should fail
#[test]
fn test_insert_nodes() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
let node = registry
.fetch_node(&get_node_1().identity)
.expect("Failed to fetch node")
.expect("Node not found");
assert_eq!(node, get_node_1());
if registry.insert_node(get_node_1()).is_ok() {
panic!("Should have returned an error because of duplicate endpoint")
}
}
/// Test that a new node can be inserted into the registry and fetched
///
/// 1. Setup sqlite database
/// 2. Insert node 1 and 2
/// 3. Try to fetch that does not exist
#[test]
fn test_fetch_node_not_found() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
assert_eq!(
registry
.fetch_node("DoesNotExist")
.expect("Failed to fetch node"),
None
)
}
/// Verifies that `has_node` properly determines if a node exists in the registry.
///
/// 1. Setup sqlite database
/// 2. Insert node 1
/// 3. Validate that the registry has node 1 but not node 2
#[test]
fn test_has_node() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
assert!(registry
.has_node(&get_node_1().identity)
.expect("Failed to check if node1 exists"));
assert!(!registry
.has_node(&get_node_2().identity)
.expect("Failed to check if node2 exists"));
}
/// Verifies that list_nodes returns a list of nodes.
///
/// 1. Setup sqlite database
/// 2. Insert node 1 and 2
/// 3. Validate that the registry returns both nodes in the list
#[test]
fn test_list_nodes_ok() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
let nodes = registry
.list_nodes(&[])
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes[0], get_node_1());
assert_eq!(nodes[1], get_node_2());
}
/// Verifies that list_nodes returns an empty list when there are no nodes in the registry.
///
/// 1. Setup sqlite database
/// 2. Validate that the registry returns an empty list
#[test]
fn test_list_nodes_empty_ok() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
let nodes = registry
.list_nodes(&[])
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 0);
}
/// Verifies that list_nodes returns the correct items when it is filtered by metadata.
///
/// 1. Setup sqlite database
/// 2. Insert node 1 and 2
/// 3. Validate that the registry returns only node 2 when filtered by company
#[test]
fn test_list_nodes_filter_metadata_ok() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Eq(
"company".into(),
get_node_2().metadata.get("company").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0], get_node_2());
}
/// Verifies that list_nodes returns the correct items when it is filtered by multiple
/// metadata fields.
///
/// 1. Setup sqlite database
/// 2. Insert node 1, 2 and 3
/// 3. Validate that the registry returns only node 3 when filtered by company and admin
#[test]
fn test_list_nodes_filter_metadata_mutliple() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
registry
.insert_node(get_node_3())
.expect("Unable to insert node");
let filter = vec![
MetadataPredicate::Eq(
"company".to_string(),
get_node_3().metadata.get("company").unwrap().to_string(),
),
MetadataPredicate::Eq(
"admin".to_string(),
get_node_3().metadata.get("admin").unwrap().to_string(),
),
];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0], get_node_3());
}
/// Verifies that list_nodes returns an empty list when no nodes fits the filtering criteria.
///
///
/// 1. Setup sqlite database
/// 2. Insert node 1, and
/// 3. Validate that the registry returns an empty list
#[test]
fn test_list_nodes_filter_empty_ok() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Eq(
"admin".to_string(),
get_node_3().metadata.get("admin").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 0);
}
/// Verifies that list_nodes returns the correct items when it is filtered by metadata.
///
/// 1. Setup sqlite database
/// 2. Insert node 1 and 2
/// 3. Validate that the registry returns only node 1 when filtered by company
#[test]
fn test_list_nodes_filter_metadata_not_equal() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Ne(
"company".into(),
get_node_2().metadata.get("company").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0], get_node_1());
}
/// Verifies that list_nodes returns the correct items when it is filtered by metadata.
///
/// 1. Setup sqlite database
/// 2. Insert node 1 and 2
/// 3. Validate that the registry returns only node 2 when filtered by gt admin Bob
#[test]
fn test_list_nodes_filter_metadata_gt() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Gt(
"admin".into(),
get_node_1().metadata.get("admin").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0], get_node_2());
}
/// Verifies that list_nodes returns the correct items when it is filtered by metadata.
///
/// 1. Setup sqlite database
/// 2. Insert node 1, 2, and 3
/// 3. Validate that the registry returns node 2 and 3 when filtered by ge admin Carol
#[test]
fn test_list_nodes_filter_metadata_ge() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
registry
.insert_node(get_node_3())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Ge(
"admin".into(),
get_node_2().metadata.get("admin").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes, [get_node_2(), get_node_3()]);
}
/// Verifies that list_nodes returns the correct items when it is filtered by metadata.
///
/// 1. Setup sqlite database
/// 2. Insert node 1 and 2
/// 3. Validate that the registry returns only node 1 when filtered by lt admin Carol
#[test]
fn test_list_nodes_filter_metadata_lt() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Lt(
"admin".into(),
get_node_2().metadata.get("admin").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0], get_node_1());
}
/// Verifies that list_nodes returns the correct items when it is filtered by metadata.
///
/// 1. Setup sqlite database
/// 2. Insert node 1, 2, and 3
/// 3. Validate that the registry returns node 1 and 2 when filtered by le admin Carol
#[test]
fn test_list_nodes_filter_metadata_le() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
registry
.insert_node(get_node_3())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Le(
"admin".into(),
get_node_2().metadata.get("admin").unwrap().to_string(),
)];
let nodes = registry
.list_nodes(&filter)
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes, [get_node_1(), get_node_2()]);
}
/// Verifies that delete_nodes removes the required node
///
/// 1. Setup sqlite database
/// 2. Insert node 1, 2, and 3
/// 3. Delete node 2
/// 4. Verify that only node 1 and 3 are returned from list
#[test]
fn test_delete_node() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
registry
.insert_node(get_node_3())
.expect("Unable to insert node");
registry
.delete_node("Node-456")
.expect("Unable to delete node");
let nodes = registry
.list_nodes(&[])
.expect("Failed to retrieve nodes")
.collect::<Vec<_>>();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes, [get_node_1(), get_node_3()]);
}
/// Verifies that count_nodes returns the correct number of nodes
///
/// 1. Setup sqlite database
/// 2. Insert node 1, 2, and 3
/// 4. Verify that the registry count_nodes returns 3
#[test]
fn test_count_node() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
registry
.insert_node(get_node_3())
.expect("Unable to insert node");
let count = registry.count_nodes(&[]).expect("Failed to retrieve nodes");
assert_eq!(count, 3);
}
/// Verifies that count_nodes returns the correct number of nodes when filtered with metadata
///
/// 1. Setup sqlite database
/// 2. Insert node 1, 2, and 3
/// 4. Verify that the registry count_nodes returns 2 when filtered by company Cargill
#[test]
fn test_count_node_metadata() {
let pool = create_connection_pool_and_migrate();
let registry = DieselRegistry::new(pool);
registry
.insert_node(get_node_1())
.expect("Unable to insert node");
registry
.insert_node(get_node_2())
.expect("Unable to insert node");
registry
.insert_node(get_node_3())
.expect("Unable to insert node");
let filter = vec![MetadataPredicate::Eq(
"company".into(),
get_node_2().metadata.get("company").unwrap().to_string(),
)];
let count = registry
.count_nodes(&filter)
.expect("Failed to retrieve nodes");
assert_eq!(count, 2);
}
fn get_node_1() -> Node {
Node::builder("Node-123")
.with_endpoint("tcps://12.0.0.123:8431")
.with_display_name("Bitwise IO - Node 1")
.with_key("abcd")
.with_metadata("company", "Bitwise IO")
.with_metadata("admin", "Bob")
.build()
.expect("Failed to build node1")
}
fn get_node_2() -> Node {
Node::builder("Node-456")
.with_endpoint("tcps://12.0.0.123:8434")
.with_display_name("Cargill - Node 1")
.with_key("0123")
.with_metadata("company", "Cargill")
.with_metadata("admin", "Carol")
.build()
.expect("Failed to build node2")
}
fn get_node_3() -> Node {
Node::builder("Node-789")
.with_endpoint("tcps://12.0.0.123:8435")
.with_display_name("Cargill - Node 2")
.with_key("4567")
.with_metadata("company", "Cargill")
.with_metadata("admin", "Charlie")
.build()
.expect("Failed to build node3")
}
/// Creates a connection pool for an in-memory SQLite database with only a single connection
/// available. Each connection is backed by a different in-memory SQLite database, so limiting
/// the pool to a single connection ensures that the same DB is used for all operations.
fn create_connection_pool_and_migrate() -> Pool<ConnectionManager<SqliteConnection>> {
let connection_manager = ConnectionManager::<SqliteConnection>::new(":memory:");
let pool = Pool::builder()
.max_size(1)
.build(connection_manager)
.expect("Failed to build connection pool");
run_sqlite_migrations(&*pool.get().expect("Failed to get connection for migrations"))
.expect("Failed to run migrations");
pool
}
}
| 32.760116 | 98 | 0.598191 |
fec294cd35b776f8ddf91d3c01a5641e28c8349b | 1,605 | use crate::*;
use std_::fmt::Debug;
mod with_super_traits{
use super::*;
structural_alias!{
trait Trait:Copy{
a:u8,
}
}
trait AssertImplies:Trait{}
impl<This> AssertImplies for This
where
This:Copy+IntoFieldMut<FP!(a),Ty=u8>
{}
/// This function ensures that the supertraits and field accessors in Trait
/// are implied by `T:Trait`.
#[allow(dead_code)]
fn func<T:Trait>(v:T){
let _copy=v;
let _:&u8=v.field_(fp!(a));
}
}
/////////////////////////////////////////////
mod with_where_clause{
use super::*;
structural_alias!{
trait WithWhereClause<T:Clone>:Copy
where
T:Debug
{
a:T,
}
}
trait AssertImplies<T>:WithWhereClause<T>
where
T:Clone+Debug
{}
impl<This,T> AssertImplies<T> for This
where
T:Clone+Debug,
This:Copy+IntoFieldMut<FP!(a),Ty=T>
{}
}
/////////////////////////////////////////////
mod all_access{
use super::*;
structural_alias!{
trait Foo<T>{
a:u32,
ref b:T,
mut c:i64,
move d:&'static str,
}
}
trait Dummy{
fn well<This,T>()
where
This:Foo<T>;
}
impl Dummy for () {
fn well<This,T>()
where
This:
GetField<FP!(a), Ty=u32>+
GetField<FP!(b), Ty=T>+
GetFieldMut<FP!(c), Ty=i64>+
IntoField<FP!(d), Ty=&'static str>,
{}
}
}
| 16.546392 | 79 | 0.450467 |
4856fa3fc45acae3881a226d1d1b97fbb8d74bb2 | 73,339 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Name resolution for lifetimes.
//!
//! Name resolution for lifetimes follows MUCH simpler rules than the
//! full resolve. For example, lifetime names are never exported or
//! used between functions, and they operate in a purely top-down
//! way. Therefore we break lifetime name resolution into a separate pass.
use hir::map::Map;
use hir::def::Def;
use hir::def_id::DefId;
use middle::cstore::CrateStore;
use session::Session;
use ty;
use std::cell::Cell;
use std::mem::replace;
use syntax::ast;
use syntax::attr;
use syntax::ptr::P;
use syntax_pos::Span;
use errors::DiagnosticBuilder;
use util::common::ErrorReported;
use util::nodemap::{NodeMap, NodeSet, FxHashSet, FxHashMap, DefIdMap};
use rustc_back::slice;
use hir;
use hir::intravisit::{self, Visitor, NestedVisitorMap};
/// The origin of a named lifetime definition.
///
/// This is used to prevent the usage of in-band lifetimes in `Fn`/`fn` syntax.
#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
pub enum LifetimeDefOrigin {
// Explicit binders like `fn foo<'a>(x: &'a u8)`
Explicit,
// In-band declarations like `fn foo(x: &'a u8)`
InBand,
}
impl LifetimeDefOrigin {
fn from_is_in_band(is_in_band: bool) -> Self {
if is_in_band {
LifetimeDefOrigin::InBand
} else {
LifetimeDefOrigin::Explicit
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
pub enum Region {
Static,
EarlyBound(/* index */ u32, /* lifetime decl */ DefId, LifetimeDefOrigin),
LateBound(ty::DebruijnIndex, /* lifetime decl */ DefId, LifetimeDefOrigin),
LateBoundAnon(ty::DebruijnIndex, /* anon index */ u32),
Free(DefId, /* lifetime decl */ DefId),
}
impl Region {
fn early(hir_map: &Map, index: &mut u32, def: &hir::LifetimeDef)
-> (hir::LifetimeName, Region)
{
let i = *index;
*index += 1;
let def_id = hir_map.local_def_id(def.lifetime.id);
let origin = LifetimeDefOrigin::from_is_in_band(def.in_band);
debug!("Region::early: index={} def_id={:?}", i, def_id);
(def.lifetime.name, Region::EarlyBound(i, def_id, origin))
}
fn late(hir_map: &Map, def: &hir::LifetimeDef) -> (hir::LifetimeName, Region) {
let depth = ty::DebruijnIndex::new(1);
let def_id = hir_map.local_def_id(def.lifetime.id);
let origin = LifetimeDefOrigin::from_is_in_band(def.in_band);
(def.lifetime.name, Region::LateBound(depth, def_id, origin))
}
fn late_anon(index: &Cell<u32>) -> Region {
let i = index.get();
index.set(i + 1);
let depth = ty::DebruijnIndex::new(1);
Region::LateBoundAnon(depth, i)
}
fn id(&self) -> Option<DefId> {
match *self {
Region::Static |
Region::LateBoundAnon(..) => None,
Region::EarlyBound(_, id, _) |
Region::LateBound(_, id, _) |
Region::Free(_, id) => Some(id)
}
}
fn shifted(self, amount: u32) -> Region {
match self {
Region::LateBound(depth, id, origin) => {
Region::LateBound(depth.shifted(amount), id, origin)
}
Region::LateBoundAnon(depth, index) => {
Region::LateBoundAnon(depth.shifted(amount), index)
}
_ => self
}
}
fn from_depth(self, depth: u32) -> Region {
match self {
Region::LateBound(debruijn, id, origin) => {
Region::LateBound(ty::DebruijnIndex {
depth: debruijn.depth - (depth - 1)
}, id, origin)
}
Region::LateBoundAnon(debruijn, index) => {
Region::LateBoundAnon(ty::DebruijnIndex {
depth: debruijn.depth - (depth - 1)
}, index)
}
_ => self
}
}
fn subst(self, params: &[hir::Lifetime], map: &NamedRegionMap)
-> Option<Region> {
if let Region::EarlyBound(index, _, _) = self {
params.get(index as usize).and_then(|lifetime| {
map.defs.get(&lifetime.id).cloned()
})
} else {
Some(self)
}
}
}
/// A set containing, at most, one known element.
/// If two distinct values are inserted into a set, then it
/// becomes `Many`, which can be used to detect ambiguities.
#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)]
pub enum Set1<T> {
Empty,
One(T),
Many
}
impl<T: PartialEq> Set1<T> {
pub fn insert(&mut self, value: T) {
if let Set1::Empty = *self {
*self = Set1::One(value);
return;
}
if let Set1::One(ref old) = *self {
if *old == value {
return;
}
}
*self = Set1::Many;
}
}
pub type ObjectLifetimeDefault = Set1<Region>;
// Maps the id of each lifetime reference to the lifetime decl
// that it corresponds to.
pub struct NamedRegionMap {
// maps from every use of a named (not anonymous) lifetime to a
// `Region` describing how that region is bound
pub defs: NodeMap<Region>,
// the set of lifetime def ids that are late-bound; a region can
// be late-bound if (a) it does NOT appear in a where-clause and
// (b) it DOES appear in the arguments.
pub late_bound: NodeSet,
// For each type and trait definition, maps type parameters
// to the trait object lifetime defaults computed from them.
pub object_lifetime_defaults: NodeMap<Vec<ObjectLifetimeDefault>>,
}
struct LifetimeContext<'a, 'tcx: 'a> {
sess: &'a Session,
cstore: &'a CrateStore,
hir_map: &'a Map<'tcx>,
map: &'a mut NamedRegionMap,
scope: ScopeRef<'a>,
// Deep breath. Our representation for poly trait refs contains a single
// binder and thus we only allow a single level of quantification. However,
// the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>`
// and `for <'a, 'b> &'b T: Foo<'a>`. In order to get the de Bruijn indices
// correct when representing these constraints, we should only introduce one
// scope. However, we want to support both locations for the quantifier and
// during lifetime resolution we want precise information (so we can't
// desugar in an earlier phase).
// SO, if we encounter a quantifier at the outer scope, we set
// trait_ref_hack to true (and introduce a scope), and then if we encounter
// a quantifier at the inner scope, we error. If trait_ref_hack is false,
// then we introduce the scope at the inner quantifier.
// I'm sorry.
trait_ref_hack: bool,
// Used to disallow the use of in-band lifetimes in `fn` or `Fn` syntax.
is_in_fn_syntax: bool,
// List of labels in the function/method currently under analysis.
labels_in_fn: Vec<(ast::Name, Span)>,
// Cache for cross-crate per-definition object lifetime defaults.
xcrate_object_lifetime_defaults: DefIdMap<Vec<ObjectLifetimeDefault>>,
}
#[derive(Debug)]
enum Scope<'a> {
/// Declares lifetimes, and each can be early-bound or late-bound.
/// The `DebruijnIndex` of late-bound lifetimes starts at `1` and
/// it should be shifted by the number of `Binder`s in between the
/// declaration `Binder` and the location it's referenced from.
Binder {
lifetimes: FxHashMap<hir::LifetimeName, Region>,
/// if we extend this scope with another scope, what is the next index
/// we should use for an early-bound region?
next_early_index: u32,
s: ScopeRef<'a>
},
/// Lifetimes introduced by a fn are scoped to the call-site for that fn,
/// if this is a fn body, otherwise the original definitions are used.
/// Unspecified lifetimes are inferred, unless an elision scope is nested,
/// e.g. `(&T, fn(&T) -> &T);` becomes `(&'_ T, for<'a> fn(&'a T) -> &'a T)`.
Body {
id: hir::BodyId,
s: ScopeRef<'a>
},
/// A scope which either determines unspecified lifetimes or errors
/// on them (e.g. due to ambiguity). For more details, see `Elide`.
Elision {
elide: Elide,
s: ScopeRef<'a>
},
/// Use a specific lifetime (if `Some`) or leave it unset (to be
/// inferred in a function body or potentially error outside one),
/// for the default choice of lifetime in a trait object type.
ObjectLifetimeDefault {
lifetime: Option<Region>,
s: ScopeRef<'a>
},
Root
}
#[derive(Clone, Debug)]
enum Elide {
/// Use a fresh anonymous late-bound lifetime each time, by
/// incrementing the counter to generate sequential indices.
FreshLateAnon(Cell<u32>),
/// Always use this one lifetime.
Exact(Region),
/// Less or more than one lifetime were found, error on unspecified.
Error(Vec<ElisionFailureInfo>)
}
#[derive(Clone, Debug)]
struct ElisionFailureInfo {
/// Where we can find the argument pattern.
parent: Option<hir::BodyId>,
/// The index of the argument in the original definition.
index: usize,
lifetime_count: usize,
have_bound_regions: bool
}
type ScopeRef<'a> = &'a Scope<'a>;
const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root;
pub fn krate(sess: &Session,
cstore: &CrateStore,
hir_map: &Map)
-> Result<NamedRegionMap, ErrorReported> {
let krate = hir_map.krate();
let mut map = NamedRegionMap {
defs: NodeMap(),
late_bound: NodeSet(),
object_lifetime_defaults: compute_object_lifetime_defaults(sess, hir_map),
};
sess.track_errors(|| {
let mut visitor = LifetimeContext {
sess,
cstore,
hir_map,
map: &mut map,
scope: ROOT_SCOPE,
trait_ref_hack: false,
is_in_fn_syntax: false,
labels_in_fn: vec![],
xcrate_object_lifetime_defaults: DefIdMap(),
};
for (_, item) in &krate.items {
visitor.visit_item(item);
}
})?;
Ok(map)
}
impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(self.hir_map)
}
// We want to nest trait/impl items in their parent, but nothing else.
fn visit_nested_item(&mut self, _: hir::ItemId) {}
fn visit_nested_body(&mut self, body: hir::BodyId) {
// Each body has their own set of labels, save labels.
let saved = replace(&mut self.labels_in_fn, vec![]);
let body = self.hir_map.body(body);
extract_labels(self, body);
self.with(Scope::Body { id: body.id(), s: self.scope }, |_, this| {
this.visit_body(body);
});
replace(&mut self.labels_in_fn, saved);
}
fn visit_item(&mut self, item: &'tcx hir::Item) {
match item.node {
hir::ItemFn(ref decl, _, _, _, ref generics, _) => {
self.visit_early_late(None, decl, generics, |this| {
intravisit::walk_item(this, item);
});
}
hir::ItemExternCrate(_) |
hir::ItemUse(..) |
hir::ItemMod(..) |
hir::ItemAutoImpl(..) |
hir::ItemForeignMod(..) |
hir::ItemGlobalAsm(..) => {
// These sorts of items have no lifetime parameters at all.
intravisit::walk_item(self, item);
}
hir::ItemStatic(..) |
hir::ItemConst(..) => {
// No lifetime parameters, but implied 'static.
let scope = Scope::Elision {
elide: Elide::Exact(Region::Static),
s: ROOT_SCOPE
};
self.with(scope, |_, this| intravisit::walk_item(this, item));
}
hir::ItemTy(_, ref generics) |
hir::ItemEnum(_, ref generics) |
hir::ItemStruct(_, ref generics) |
hir::ItemUnion(_, ref generics) |
hir::ItemTrait(_, _, ref generics, ..) |
hir::ItemImpl(_, _, _, ref generics, ..) => {
// These kinds of items have only early bound lifetime parameters.
let mut index = if let hir::ItemTrait(..) = item.node {
1 // Self comes before lifetimes
} else {
0
};
let lifetimes = generics.lifetimes.iter().map(|def| {
Region::early(self.hir_map, &mut index, def)
}).collect();
let next_early_index = index + generics.ty_params.len() as u32;
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: ROOT_SCOPE
};
self.with(scope, |old_scope, this| {
this.check_lifetime_defs(old_scope, &generics.lifetimes);
intravisit::walk_item(this, item);
});
}
}
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) {
match item.node {
hir::ForeignItemFn(ref decl, _, ref generics) => {
self.visit_early_late(None, decl, generics, |this| {
intravisit::walk_foreign_item(this, item);
})
}
hir::ForeignItemStatic(..) => {
intravisit::walk_foreign_item(self, item);
}
hir::ForeignItemType => {
intravisit::walk_foreign_item(self, item);
}
}
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
debug!("visit_ty: ty={:?}", ty);
match ty.node {
hir::TyBareFn(ref c) => {
let next_early_index = self.next_early_index();
let was_in_fn_syntax = self.is_in_fn_syntax;
self.is_in_fn_syntax = true;
let scope = Scope::Binder {
lifetimes: c.lifetimes.iter().map(|def| {
Region::late(self.hir_map, def)
}).collect(),
next_early_index,
s: self.scope
};
self.with(scope, |old_scope, this| {
// a bare fn has no bounds, so everything
// contained within is scoped within its binder.
this.check_lifetime_defs(old_scope, &c.lifetimes);
intravisit::walk_ty(this, ty);
});
self.is_in_fn_syntax = was_in_fn_syntax;
}
hir::TyTraitObject(ref bounds, ref lifetime) => {
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
if lifetime.is_elided() {
self.resolve_object_lifetime_default(lifetime)
} else {
self.visit_lifetime(lifetime);
}
}
hir::TyRptr(ref lifetime_ref, ref mt) => {
self.visit_lifetime(lifetime_ref);
let scope = Scope::ObjectLifetimeDefault {
lifetime: self.map.defs.get(&lifetime_ref.id).cloned(),
s: self.scope
};
self.with(scope, |_, this| this.visit_ty(&mt.ty));
}
hir::TyImplTraitExistential(ref exist_ty, ref lifetimes) => {
// Resolve the lifetimes that are applied to the existential type.
// These are resolved in the current scope.
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
// `fn foo<'a>() -> MyAnonTy<'a> { ... }`
// ^ ^this gets resolved in the current scope
for lifetime in lifetimes {
self.visit_lifetime(lifetime);
// Check for predicates like `impl for<'a> SomeTrait<impl OtherTrait<'a>>`
// and ban them. Type variables instantiated inside binders aren't
// well-supported at the moment, so this doesn't work.
// In the future, this should be fixed and this error should be removed.
let def = self.map.defs.get(&lifetime.id);
if let Some(&Region::LateBound(_, def_id, _)) = def {
if let Some(node_id) = self.hir_map.as_local_node_id(def_id) {
// Ensure that the parent of the def is an item, not HRTB
let parent_id = self.hir_map.get_parent_node(node_id);
let parent_impl_id = hir::ImplItemId { node_id: parent_id };
let parent_trait_id = hir::TraitItemId { node_id: parent_id };
let krate = self.hir_map.forest.krate();
if !(krate.items.contains_key(&parent_id) ||
krate.impl_items.contains_key(&parent_impl_id) ||
krate.trait_items.contains_key(&parent_trait_id))
{
span_err!(self.sess, lifetime.span, E0657,
"`impl Trait` can only capture lifetimes \
bound at the fn or impl level");
}
}
}
}
// Resolve the lifetimes in the bounds to the lifetime defs in the generics.
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
// `abstract type MyAnonTy<'b>: MyTrait<'b>;`
// ^ ^ this gets resolved in the scope of
// the exist_ty generics
let hir::ExistTy { ref generics, ref bounds } = *exist_ty;
let mut index = self.next_early_index();
debug!("visit_ty: index = {}", index);
let lifetimes = generics.lifetimes.iter()
.map(|lt_def| Region::early(self.hir_map, &mut index, lt_def))
.collect();
let next_early_index = index + generics.ty_params.len() as u32;
let scope = Scope::Binder { lifetimes, next_early_index, s: self.scope };
self.with(scope, |_old_scope, this| {
this.visit_generics(generics);
for bound in bounds {
this.visit_ty_param_bound(bound);
}
});
}
_ => {
intravisit::walk_ty(self, ty)
}
}
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) {
if let hir::TraitItemKind::Method(ref sig, _) = trait_item.node {
self.visit_early_late(
Some(self.hir_map.get_parent(trait_item.id)),
&sig.decl, &trait_item.generics,
|this| intravisit::walk_trait_item(this, trait_item))
} else {
intravisit::walk_trait_item(self, trait_item);
}
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) {
if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node {
self.visit_early_late(
Some(self.hir_map.get_parent(impl_item.id)),
&sig.decl, &impl_item.generics,
|this| intravisit::walk_impl_item(this, impl_item))
} else {
intravisit::walk_impl_item(self, impl_item);
}
}
fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
if lifetime_ref.is_elided() {
self.resolve_elided_lifetimes(slice::ref_slice(lifetime_ref));
return;
}
if lifetime_ref.is_static() {
self.insert_lifetime(lifetime_ref, Region::Static);
return;
}
self.resolve_lifetime_ref(lifetime_ref);
}
fn visit_path(&mut self, path: &'tcx hir::Path, _: ast::NodeId) {
for (i, segment) in path.segments.iter().enumerate() {
let depth = path.segments.len() - i - 1;
if let Some(ref parameters) = segment.parameters {
self.visit_segment_parameters(path.def, depth, parameters);
}
}
}
fn visit_fn_decl(&mut self, fd: &'tcx hir::FnDecl) {
let output = match fd.output {
hir::DefaultReturn(_) => None,
hir::Return(ref ty) => Some(ty)
};
self.visit_fn_like_elision(&fd.inputs, output);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
check_mixed_explicit_and_in_band_defs(&self.sess, &generics.lifetimes);
for ty_param in generics.ty_params.iter() {
walk_list!(self, visit_ty_param_bound, &ty_param.bounds);
if let Some(ref ty) = ty_param.default {
self.visit_ty(&ty);
}
}
for predicate in &generics.where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ ref bounded_ty,
ref bounds,
ref bound_lifetimes,
.. }) => {
if !bound_lifetimes.is_empty() {
self.trait_ref_hack = true;
let next_early_index = self.next_early_index();
let scope = Scope::Binder {
lifetimes: bound_lifetimes.iter().map(|def| {
Region::late(self.hir_map, def)
}).collect(),
next_early_index,
s: self.scope
};
let result = self.with(scope, |old_scope, this| {
this.check_lifetime_defs(old_scope, bound_lifetimes);
this.visit_ty(&bounded_ty);
walk_list!(this, visit_ty_param_bound, bounds);
});
self.trait_ref_hack = false;
result
} else {
self.visit_ty(&bounded_ty);
walk_list!(self, visit_ty_param_bound, bounds);
}
}
&hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
ref bounds,
.. }) => {
self.visit_lifetime(lifetime);
for bound in bounds {
self.visit_lifetime(bound);
}
}
&hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ref lhs_ty,
ref rhs_ty,
.. }) => {
self.visit_ty(lhs_ty);
self.visit_ty(rhs_ty);
}
}
}
}
fn visit_poly_trait_ref(&mut self,
trait_ref: &'tcx hir::PolyTraitRef,
_modifier: hir::TraitBoundModifier) {
debug!("visit_poly_trait_ref trait_ref={:?}", trait_ref);
if !self.trait_ref_hack || !trait_ref.bound_lifetimes.is_empty() {
if self.trait_ref_hack {
span_err!(self.sess, trait_ref.span, E0316,
"nested quantification of lifetimes");
}
let next_early_index = self.next_early_index();
let scope = Scope::Binder {
lifetimes: trait_ref.bound_lifetimes.iter().map(|def| {
Region::late(self.hir_map, def)
}).collect(),
next_early_index,
s: self.scope
};
self.with(scope, |old_scope, this| {
this.check_lifetime_defs(old_scope, &trait_ref.bound_lifetimes);
for lifetime in &trait_ref.bound_lifetimes {
this.visit_lifetime_def(lifetime);
}
this.visit_trait_ref(&trait_ref.trait_ref)
})
} else {
self.visit_trait_ref(&trait_ref.trait_ref)
}
}
}
#[derive(Copy, Clone, PartialEq)]
enum ShadowKind { Label, Lifetime }
struct Original { kind: ShadowKind, span: Span }
struct Shadower { kind: ShadowKind, span: Span }
fn original_label(span: Span) -> Original {
Original { kind: ShadowKind::Label, span: span }
}
fn shadower_label(span: Span) -> Shadower {
Shadower { kind: ShadowKind::Label, span: span }
}
fn original_lifetime(span: Span) -> Original {
Original { kind: ShadowKind::Lifetime, span: span }
}
fn shadower_lifetime(l: &hir::Lifetime) -> Shadower {
Shadower { kind: ShadowKind::Lifetime, span: l.span }
}
impl ShadowKind {
fn desc(&self) -> &'static str {
match *self {
ShadowKind::Label => "label",
ShadowKind::Lifetime => "lifetime",
}
}
}
fn check_mixed_explicit_and_in_band_defs(
sess: &Session,
lifetime_defs: &[hir::LifetimeDef],
) {
let oob_def = lifetime_defs.iter().find(|lt| !lt.in_band);
let in_band_def = lifetime_defs.iter().find(|lt| lt.in_band);
if let (Some(oob_def), Some(in_band_def)) = (oob_def, in_band_def) {
struct_span_err!(sess, in_band_def.lifetime.span, E0688,
"cannot mix in-band and explicit lifetime definitions")
.span_label(in_band_def.lifetime.span, "in-band lifetime definition here")
.span_label(oob_def.lifetime.span, "explicit lifetime definition here")
.emit();
}
}
fn signal_shadowing_problem(sess: &Session, name: ast::Name, orig: Original, shadower: Shadower) {
let mut err = if let (ShadowKind::Lifetime, ShadowKind::Lifetime) = (orig.kind, shadower.kind) {
// lifetime/lifetime shadowing is an error
struct_span_err!(sess, shadower.span, E0496,
"{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(), name, orig.kind.desc())
} else {
// shadowing involving a label is only a warning, due to issues with
// labels and lifetimes not being macro-hygienic.
sess.struct_span_warn(shadower.span,
&format!("{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(), name, orig.kind.desc()))
};
err.span_label(orig.span, "first declared here");
err.span_label(shadower.span,
format!("lifetime {} already in scope", name));
err.emit();
}
// Adds all labels in `b` to `ctxt.labels_in_fn`, signalling a warning
// if one of the label shadows a lifetime or another label.
fn extract_labels(ctxt: &mut LifetimeContext, body: &hir::Body) {
struct GatherLabels<'a, 'tcx: 'a> {
sess: &'a Session,
hir_map: &'a Map<'tcx>,
scope: ScopeRef<'a>,
labels_in_fn: &'a mut Vec<(ast::Name, Span)>,
}
let mut gather = GatherLabels {
sess: ctxt.sess,
hir_map: ctxt.hir_map,
scope: ctxt.scope,
labels_in_fn: &mut ctxt.labels_in_fn,
};
gather.visit_body(body);
impl<'v, 'a, 'tcx> Visitor<'v> for GatherLabels<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &hir::Expr) {
if let Some((label, label_span)) = expression_label(ex) {
for &(prior, prior_span) in &self.labels_in_fn[..] {
// FIXME (#24278): non-hygienic comparison
if label == prior {
signal_shadowing_problem(self.sess,
label,
original_label(prior_span),
shadower_label(label_span));
}
}
check_if_label_shadows_lifetime(self.sess,
self.hir_map,
self.scope,
label,
label_span);
self.labels_in_fn.push((label, label_span));
}
intravisit::walk_expr(self, ex)
}
}
fn expression_label(ex: &hir::Expr) -> Option<(ast::Name, Span)> {
match ex.node {
hir::ExprWhile(.., Some(label)) |
hir::ExprLoop(_, Some(label), _) => Some((label.node, label.span)),
_ => None,
}
}
fn check_if_label_shadows_lifetime<'a>(sess: &'a Session,
hir_map: &Map,
mut scope: ScopeRef<'a>,
label: ast::Name,
label_span: Span) {
loop {
match *scope {
Scope::Body { s, .. } |
Scope::Elision { s, .. } |
Scope::ObjectLifetimeDefault { s, .. } => { scope = s; }
Scope::Root => { return; }
Scope::Binder { ref lifetimes, s, next_early_index: _ } => {
// FIXME (#24278): non-hygienic comparison
if let Some(def) = lifetimes.get(&hir::LifetimeName::Name(label)) {
let node_id = hir_map.as_local_node_id(def.id().unwrap())
.unwrap();
signal_shadowing_problem(
sess,
label,
original_lifetime(hir_map.span(node_id)),
shadower_label(label_span));
return;
}
scope = s;
}
}
}
}
}
fn compute_object_lifetime_defaults(sess: &Session, hir_map: &Map)
-> NodeMap<Vec<ObjectLifetimeDefault>> {
let mut map = NodeMap();
for item in hir_map.krate().items.values() {
match item.node {
hir::ItemStruct(_, ref generics) |
hir::ItemUnion(_, ref generics) |
hir::ItemEnum(_, ref generics) |
hir::ItemTy(_, ref generics) |
hir::ItemTrait(_, _, ref generics, ..) => {
let result = object_lifetime_defaults_for_item(hir_map, generics);
// Debugging aid.
if attr::contains_name(&item.attrs, "rustc_object_lifetime_default") {
let object_lifetime_default_reprs: String =
result.iter().map(|set| {
match *set {
Set1::Empty => "BaseDefault".to_string(),
Set1::One(Region::Static) => "'static".to_string(),
Set1::One(Region::EarlyBound(i, _, _)) => {
generics.lifetimes[i as usize].lifetime.name.name().to_string()
}
Set1::One(_) => bug!(),
Set1::Many => "Ambiguous".to_string(),
}
}).collect::<Vec<String>>().join(",");
sess.span_err(item.span, &object_lifetime_default_reprs);
}
map.insert(item.id, result);
}
_ => {}
}
}
map
}
/// Scan the bounds and where-clauses on parameters to extract bounds
/// of the form `T:'a` so as to determine the `ObjectLifetimeDefault`
/// for each type parameter.
fn object_lifetime_defaults_for_item(hir_map: &Map, generics: &hir::Generics)
-> Vec<ObjectLifetimeDefault> {
fn add_bounds(set: &mut Set1<hir::LifetimeName>, bounds: &[hir::TyParamBound]) {
for bound in bounds {
if let hir::RegionTyParamBound(ref lifetime) = *bound {
set.insert(lifetime.name);
}
}
}
generics.ty_params.iter().map(|param| {
let mut set = Set1::Empty;
add_bounds(&mut set, ¶m.bounds);
let param_def_id = hir_map.local_def_id(param.id);
for predicate in &generics.where_clause.predicates {
// Look for `type: ...` where clauses.
let data = match *predicate {
hir::WherePredicate::BoundPredicate(ref data) => data,
_ => continue
};
// Ignore `for<'a> type: ...` as they can change what
// lifetimes mean (although we could "just" handle it).
if !data.bound_lifetimes.is_empty() {
continue;
}
let def = match data.bounded_ty.node {
hir::TyPath(hir::QPath::Resolved(None, ref path)) => path.def,
_ => continue
};
if def == Def::TyParam(param_def_id) {
add_bounds(&mut set, &data.bounds);
}
}
match set {
Set1::Empty => Set1::Empty,
Set1::One(name) => {
if name == hir::LifetimeName::Static {
Set1::One(Region::Static)
} else {
generics.lifetimes.iter().enumerate().find(|&(_, def)| {
def.lifetime.name == name
}).map_or(Set1::Many, |(i, def)| {
let def_id = hir_map.local_def_id(def.lifetime.id);
let origin = LifetimeDefOrigin::from_is_in_band(def.in_band);
Set1::One(Region::EarlyBound(i as u32, def_id, origin))
})
}
}
Set1::Many => Set1::Many
}
}).collect()
}
impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
// FIXME(#37666) this works around a limitation in the region inferencer
fn hack<F>(&mut self, f: F) where
F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>),
{
f(self)
}
fn with<F>(&mut self, wrap_scope: Scope, f: F) where
F: for<'b> FnOnce(ScopeRef, &mut LifetimeContext<'b, 'tcx>),
{
let LifetimeContext {sess, cstore, hir_map, ref mut map, ..} = *self;
let labels_in_fn = replace(&mut self.labels_in_fn, vec![]);
let xcrate_object_lifetime_defaults =
replace(&mut self.xcrate_object_lifetime_defaults, DefIdMap());
let mut this = LifetimeContext {
sess,
cstore,
hir_map,
map: *map,
scope: &wrap_scope,
trait_ref_hack: self.trait_ref_hack,
is_in_fn_syntax: self.is_in_fn_syntax,
labels_in_fn,
xcrate_object_lifetime_defaults,
};
debug!("entering scope {:?}", this.scope);
f(self.scope, &mut this);
debug!("exiting scope {:?}", this.scope);
self.labels_in_fn = this.labels_in_fn;
self.xcrate_object_lifetime_defaults = this.xcrate_object_lifetime_defaults;
}
/// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
///
/// Handles visiting fns and methods. These are a bit complicated because we must distinguish
/// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
/// within type bounds; those are early bound lifetimes, and the rest are late bound.
///
/// For example:
///
/// fn foo<'a,'b,'c,T:Trait<'b>>(...)
///
/// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
/// lifetimes may be interspersed together.
///
/// If early bound lifetimes are present, we separate them into their own list (and likewise
/// for late bound). They will be numbered sequentially, starting from the lowest index that is
/// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
/// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late<F>(&mut self,
parent_id: Option<ast::NodeId>,
decl: &'tcx hir::FnDecl,
generics: &'tcx hir::Generics,
walk: F) where
F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
{
insert_late_bound_lifetimes(self.map, decl, generics);
// Find the start of nested early scopes, e.g. in methods.
let mut index = 0;
if let Some(parent_id) = parent_id {
let parent = self.hir_map.expect_item(parent_id);
if let hir::ItemTrait(..) = parent.node {
index += 1; // Self comes first.
}
match parent.node {
hir::ItemTrait(_, _, ref generics, ..) |
hir::ItemImpl(_, _, _, ref generics, ..) => {
index += (generics.lifetimes.len() + generics.ty_params.len()) as u32;
}
_ => {}
}
}
let lifetimes = generics.lifetimes.iter().map(|def| {
if self.map.late_bound.contains(&def.lifetime.id) {
Region::late(self.hir_map, def)
} else {
Region::early(self.hir_map, &mut index, def)
}
}).collect();
let next_early_index = index + generics.ty_params.len() as u32;
let scope = Scope::Binder {
lifetimes,
next_early_index,
s: self.scope
};
self.with(scope, move |old_scope, this| {
this.check_lifetime_defs(old_scope, &generics.lifetimes);
this.hack(walk); // FIXME(#37666) workaround in place of `walk(this)`
});
}
/// Returns the next index one would use for an early-bound-region
/// if extending the current scope.
fn next_early_index(&self) -> u32 {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root =>
return 0,
Scope::Binder { next_early_index, .. } =>
return next_early_index,
Scope::Body { s, .. } |
Scope::Elision { s, .. } |
Scope::ObjectLifetimeDefault { s, .. } =>
scope = s,
}
}
}
fn resolve_lifetime_ref(&mut self, lifetime_ref: &hir::Lifetime) {
debug!("resolve_lifetime_ref(lifetime_ref={:?})", lifetime_ref);
// Walk up the scope chain, tracking the number of fn scopes
// that we pass through, until we find a lifetime with the
// given name or we run out of scopes.
// search.
let mut late_depth = 0;
let mut scope = self.scope;
let mut outermost_body = None;
let result = loop {
match *scope {
Scope::Body { id, s } => {
outermost_body = Some(id);
scope = s;
}
Scope::Root => {
break None;
}
Scope::Binder { ref lifetimes, s, next_early_index: _ } => {
if let Some(&def) = lifetimes.get(&lifetime_ref.name) {
break Some(def.shifted(late_depth));
} else {
late_depth += 1;
scope = s;
}
}
Scope::Elision { s, .. } |
Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
};
if let Some(mut def) = result {
if let Region::EarlyBound(..) = def {
// Do not free early-bound regions, only late-bound ones.
} else if let Some(body_id) = outermost_body {
let fn_id = self.hir_map.body_owner(body_id);
match self.hir_map.get(fn_id) {
hir::map::NodeItem(&hir::Item {
node: hir::ItemFn(..), ..
}) |
hir::map::NodeTraitItem(&hir::TraitItem {
node: hir::TraitItemKind::Method(..), ..
}) |
hir::map::NodeImplItem(&hir::ImplItem {
node: hir::ImplItemKind::Method(..), ..
}) => {
let scope = self.hir_map.local_def_id(fn_id);
def = Region::Free(scope, def.id().unwrap());
}
_ => {}
}
}
// Check for fn-syntax conflicts with in-band lifetime definitions
if self.is_in_fn_syntax {
match def {
Region::EarlyBound(_, _, LifetimeDefOrigin::InBand) |
Region::LateBound(_, _, LifetimeDefOrigin::InBand) => {
struct_span_err!(self.sess, lifetime_ref.span, E0687,
"lifetimes used in `fn` or `Fn` syntax must be \
explicitly declared using `<...>` binders")
.span_label(lifetime_ref.span,
"in-band lifetime definition")
.emit();
},
Region::Static |
Region::EarlyBound(_, _, LifetimeDefOrigin::Explicit) |
Region::LateBound(_, _, LifetimeDefOrigin::Explicit) |
Region::LateBoundAnon(..) |
Region::Free(..) => {}
}
}
self.insert_lifetime(lifetime_ref, def);
} else {
struct_span_err!(self.sess, lifetime_ref.span, E0261,
"use of undeclared lifetime name `{}`", lifetime_ref.name.name())
.span_label(lifetime_ref.span, "undeclared lifetime")
.emit();
}
}
fn visit_segment_parameters(&mut self,
def: Def,
depth: usize,
params: &'tcx hir::PathParameters) {
if params.parenthesized {
let was_in_fn_syntax = self.is_in_fn_syntax;
self.is_in_fn_syntax = true;
self.visit_fn_like_elision(params.inputs(), Some(¶ms.bindings[0].ty));
self.is_in_fn_syntax = was_in_fn_syntax;
return;
}
if params.lifetimes.iter().all(|l| l.is_elided()) {
self.resolve_elided_lifetimes(¶ms.lifetimes);
} else {
for l in ¶ms.lifetimes { self.visit_lifetime(l); }
}
// Figure out if this is a type/trait segment,
// which requires object lifetime defaults.
let parent_def_id = |this: &mut Self, def_id: DefId| {
let def_key = if def_id.is_local() {
this.hir_map.def_key(def_id)
} else {
this.cstore.def_key(def_id)
};
DefId {
krate: def_id.krate,
index: def_key.parent.expect("missing parent")
}
};
let type_def_id = match def {
Def::AssociatedTy(def_id) if depth == 1 => {
Some(parent_def_id(self, def_id))
}
Def::Variant(def_id) if depth == 0 => {
Some(parent_def_id(self, def_id))
}
Def::Struct(def_id) |
Def::Union(def_id) |
Def::Enum(def_id) |
Def::TyAlias(def_id) |
Def::Trait(def_id) if depth == 0 => Some(def_id),
_ => None
};
let object_lifetime_defaults = type_def_id.map_or(vec![], |def_id| {
let in_body = {
let mut scope = self.scope;
loop {
match *scope {
Scope::Root => break false,
Scope::Body { .. } => break true,
Scope::Binder { s, .. } |
Scope::Elision { s, .. } |
Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
}
};
let map = &self.map;
let unsubst = if let Some(id) = self.hir_map.as_local_node_id(def_id) {
&map.object_lifetime_defaults[&id]
} else {
let cstore = self.cstore;
let sess = self.sess;
self.xcrate_object_lifetime_defaults.entry(def_id).or_insert_with(|| {
cstore.item_generics_cloned_untracked(def_id, sess)
.types
.into_iter()
.map(|def| {
def.object_lifetime_default
}).collect()
})
};
unsubst.iter().map(|set| {
match *set {
Set1::Empty => {
if in_body {
None
} else {
Some(Region::Static)
}
}
Set1::One(r) => r.subst(¶ms.lifetimes, map),
Set1::Many => None
}
}).collect()
});
for (i, ty) in params.types.iter().enumerate() {
if let Some(<) = object_lifetime_defaults.get(i) {
let scope = Scope::ObjectLifetimeDefault {
lifetime: lt,
s: self.scope
};
self.with(scope, |_, this| this.visit_ty(ty));
} else {
self.visit_ty(ty);
}
}
for b in ¶ms.bindings { self.visit_assoc_type_binding(b); }
}
fn visit_fn_like_elision(&mut self, inputs: &'tcx [P<hir::Ty>],
output: Option<&'tcx P<hir::Ty>>) {
let mut arg_elide = Elide::FreshLateAnon(Cell::new(0));
let arg_scope = Scope::Elision {
elide: arg_elide.clone(),
s: self.scope
};
self.with(arg_scope, |_, this| {
for input in inputs {
this.visit_ty(input);
}
match *this.scope {
Scope::Elision { ref elide, .. } => {
arg_elide = elide.clone();
}
_ => bug!()
}
});
let output = match output {
Some(ty) => ty,
None => return
};
// Figure out if there's a body we can get argument names from,
// and whether there's a `self` argument (treated specially).
let mut assoc_item_kind = None;
let mut impl_self = None;
let parent = self.hir_map.get_parent_node(output.id);
let body = match self.hir_map.get(parent) {
// `fn` definitions and methods.
hir::map::NodeItem(&hir::Item {
node: hir::ItemFn(.., body), ..
}) => Some(body),
hir::map::NodeTraitItem(&hir::TraitItem {
node: hir::TraitItemKind::Method(_, ref m), ..
}) => {
match self.hir_map.expect_item(self.hir_map.get_parent(parent)).node {
hir::ItemTrait(.., ref trait_items) => {
assoc_item_kind = trait_items.iter().find(|ti| ti.id.node_id == parent)
.map(|ti| ti.kind);
}
_ => {}
}
match *m {
hir::TraitMethod::Required(_) => None,
hir::TraitMethod::Provided(body) => Some(body),
}
}
hir::map::NodeImplItem(&hir::ImplItem {
node: hir::ImplItemKind::Method(_, body), ..
}) => {
match self.hir_map.expect_item(self.hir_map.get_parent(parent)).node {
hir::ItemImpl(.., ref self_ty, ref impl_items) => {
impl_self = Some(self_ty);
assoc_item_kind = impl_items.iter().find(|ii| ii.id.node_id == parent)
.map(|ii| ii.kind);
}
_ => {}
}
Some(body)
}
// Foreign functions, `fn(...) -> R` and `Trait(...) -> R` (both types and bounds).
hir::map::NodeForeignItem(_) | hir::map::NodeTy(_) | hir::map::NodeTraitRef(_) => None,
// Everything else (only closures?) doesn't
// actually enjoy elision in return types.
_ => {
self.visit_ty(output);
return;
}
};
let has_self = match assoc_item_kind {
Some(hir::AssociatedItemKind::Method { has_self }) => has_self,
_ => false
};
// In accordance with the rules for lifetime elision, we can determine
// what region to use for elision in the output type in two ways.
// First (determined here), if `self` is by-reference, then the
// implied output region is the region of the self parameter.
if has_self {
// Look for `self: &'a Self` - also desugared from `&'a self`,
// and if that matches, use it for elision and return early.
let is_self_ty = |def: Def| {
if let Def::SelfTy(..) = def {
return true;
}
// Can't always rely on literal (or implied) `Self` due
// to the way elision rules were originally specified.
let impl_self = impl_self.map(|ty| &ty.node);
if let Some(&hir::TyPath(hir::QPath::Resolved(None, ref path))) = impl_self {
match path.def {
// Whitelist the types that unambiguously always
// result in the same type constructor being used
// (it can't differ between `Self` and `self`).
Def::Struct(_) |
Def::Union(_) |
Def::Enum(_) |
Def::PrimTy(_) => return def == path.def,
_ => {}
}
}
false
};
if let hir::TyRptr(lifetime_ref, ref mt) = inputs[0].node {
if let hir::TyPath(hir::QPath::Resolved(None, ref path)) = mt.ty.node {
if is_self_ty(path.def) {
if let Some(&lifetime) = self.map.defs.get(&lifetime_ref.id) {
let scope = Scope::Elision {
elide: Elide::Exact(lifetime),
s: self.scope
};
self.with(scope, |_, this| this.visit_ty(output));
return;
}
}
}
}
}
// Second, if there was exactly one lifetime (either a substitution or a
// reference) in the arguments, then any anonymous regions in the output
// have that lifetime.
let mut possible_implied_output_region = None;
let mut lifetime_count = 0;
let arg_lifetimes = inputs.iter().enumerate().skip(has_self as usize).map(|(i, input)| {
let mut gather = GatherLifetimes {
map: self.map,
binder_depth: 1,
have_bound_regions: false,
lifetimes: FxHashSet()
};
gather.visit_ty(input);
lifetime_count += gather.lifetimes.len();
if lifetime_count == 1 && gather.lifetimes.len() == 1 {
// there's a chance that the unique lifetime of this
// iteration will be the appropriate lifetime for output
// parameters, so lets store it.
possible_implied_output_region = gather.lifetimes.iter().cloned().next();
}
ElisionFailureInfo {
parent: body,
index: i,
lifetime_count: gather.lifetimes.len(),
have_bound_regions: gather.have_bound_regions
}
}).collect();
let elide = if lifetime_count == 1 {
Elide::Exact(possible_implied_output_region.unwrap())
} else {
Elide::Error(arg_lifetimes)
};
let scope = Scope::Elision {
elide,
s: self.scope
};
self.with(scope, |_, this| this.visit_ty(output));
struct GatherLifetimes<'a> {
map: &'a NamedRegionMap,
binder_depth: u32,
have_bound_regions: bool,
lifetimes: FxHashSet<Region>,
}
impl<'v, 'a> Visitor<'v> for GatherLifetimes<'a> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &hir::Ty) {
if let hir::TyBareFn(_) = ty.node {
self.binder_depth += 1;
}
if let hir::TyTraitObject(ref bounds, ref lifetime) = ty.node {
for bound in bounds {
self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
}
// Stay on the safe side and don't include the object
// lifetime default (which may not end up being used).
if !lifetime.is_elided() {
self.visit_lifetime(lifetime);
}
} else {
intravisit::walk_ty(self, ty);
}
if let hir::TyBareFn(_) = ty.node {
self.binder_depth -= 1;
}
}
fn visit_poly_trait_ref(&mut self,
trait_ref: &hir::PolyTraitRef,
modifier: hir::TraitBoundModifier) {
self.binder_depth += 1;
intravisit::walk_poly_trait_ref(self, trait_ref, modifier);
self.binder_depth -= 1;
}
fn visit_lifetime_def(&mut self, lifetime_def: &hir::LifetimeDef) {
for l in &lifetime_def.bounds { self.visit_lifetime(l); }
}
fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) {
if let Some(&lifetime) = self.map.defs.get(&lifetime_ref.id) {
match lifetime {
Region::LateBound(debruijn, _, _) |
Region::LateBoundAnon(debruijn, _)
if debruijn.depth < self.binder_depth => {
self.have_bound_regions = true;
}
_ => {
self.lifetimes.insert(lifetime.from_depth(self.binder_depth));
}
}
}
}
}
}
fn resolve_elided_lifetimes(&mut self, lifetime_refs: &[hir::Lifetime]) {
if lifetime_refs.is_empty() {
return;
}
let span = lifetime_refs[0].span;
let mut late_depth = 0;
let mut scope = self.scope;
let error = loop {
match *scope {
// Do not assign any resolution, it will be inferred.
Scope::Body { .. } => return,
Scope::Root => break None,
Scope::Binder { s, .. } => {
late_depth += 1;
scope = s;
}
Scope::Elision { ref elide, .. } => {
let lifetime = match *elide {
Elide::FreshLateAnon(ref counter) => {
for lifetime_ref in lifetime_refs {
let lifetime = Region::late_anon(counter).shifted(late_depth);
self.insert_lifetime(lifetime_ref, lifetime);
}
return;
}
Elide::Exact(l) => l.shifted(late_depth),
Elide::Error(ref e) => break Some(e)
};
for lifetime_ref in lifetime_refs {
self.insert_lifetime(lifetime_ref, lifetime);
}
return;
}
Scope::ObjectLifetimeDefault { s, .. } => {
scope = s;
}
}
};
let mut err = struct_span_err!(self.sess, span, E0106,
"missing lifetime specifier{}",
if lifetime_refs.len() > 1 { "s" } else { "" });
let msg = if lifetime_refs.len() > 1 {
format!("expected {} lifetime parameters", lifetime_refs.len())
} else {
format!("expected lifetime parameter")
};
err.span_label(span, msg);
if let Some(params) = error {
if lifetime_refs.len() == 1 {
self.report_elision_failure(&mut err, params);
}
}
err.emit();
}
fn report_elision_failure(&mut self,
db: &mut DiagnosticBuilder,
params: &[ElisionFailureInfo]) {
let mut m = String::new();
let len = params.len();
let elided_params: Vec<_> = params.iter().cloned()
.filter(|info| info.lifetime_count > 0)
.collect();
let elided_len = elided_params.len();
for (i, info) in elided_params.into_iter().enumerate() {
let ElisionFailureInfo {
parent, index, lifetime_count: n, have_bound_regions
} = info;
let help_name = if let Some(body) = parent {
let arg = &self.hir_map.body(body).arguments[index];
format!("`{}`", self.hir_map.node_to_pretty_string(arg.pat.id))
} else {
format!("argument {}", index + 1)
};
m.push_str(&(if n == 1 {
help_name
} else {
format!("one of {}'s {} {}lifetimes", help_name, n,
if have_bound_regions { "free " } else { "" } )
})[..]);
if elided_len == 2 && i == 0 {
m.push_str(" or ");
} else if i + 2 == elided_len {
m.push_str(", or ");
} else if i != elided_len - 1 {
m.push_str(", ");
}
}
if len == 0 {
help!(db,
"this function's return type contains a borrowed value, but \
there is no value for it to be borrowed from");
help!(db,
"consider giving it a 'static lifetime");
} else if elided_len == 0 {
help!(db,
"this function's return type contains a borrowed value with \
an elided lifetime, but the lifetime cannot be derived from \
the arguments");
help!(db,
"consider giving it an explicit bounded or 'static \
lifetime");
} else if elided_len == 1 {
help!(db,
"this function's return type contains a borrowed value, but \
the signature does not say which {} it is borrowed from",
m);
} else {
help!(db,
"this function's return type contains a borrowed value, but \
the signature does not say whether it is borrowed from {}",
m);
}
}
fn resolve_object_lifetime_default(&mut self, lifetime_ref: &hir::Lifetime) {
let mut late_depth = 0;
let mut scope = self.scope;
let lifetime = loop {
match *scope {
Scope::Binder { s, .. } => {
late_depth += 1;
scope = s;
}
Scope::Root |
Scope::Elision { .. } => break Region::Static,
Scope::Body { .. } |
Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l
}
};
self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
}
fn check_lifetime_defs(&mut self, old_scope: ScopeRef, lifetimes: &[hir::LifetimeDef]) {
for i in 0..lifetimes.len() {
let lifetime_i = &lifetimes[i];
for lifetime in lifetimes {
match lifetime.lifetime.name {
hir::LifetimeName::Static | hir::LifetimeName::Underscore => {
let lifetime = lifetime.lifetime;
let name = lifetime.name.name();
let mut err = struct_span_err!(self.sess, lifetime.span, E0262,
"invalid lifetime parameter name: `{}`", name);
err.span_label(lifetime.span,
format!("{} is a reserved lifetime name", name));
err.emit();
}
hir::LifetimeName::Implicit | hir::LifetimeName::Name(_) => {}
}
}
// It is a hard error to shadow a lifetime within the same scope.
for j in i + 1..lifetimes.len() {
let lifetime_j = &lifetimes[j];
if lifetime_i.lifetime.name == lifetime_j.lifetime.name {
struct_span_err!(self.sess, lifetime_j.lifetime.span, E0263,
"lifetime name `{}` declared twice in the same scope",
lifetime_j.lifetime.name.name())
.span_label(lifetime_j.lifetime.span,
"declared twice")
.span_label(lifetime_i.lifetime.span,
"previous declaration here")
.emit();
}
}
// It is a soft error to shadow a lifetime within a parent scope.
self.check_lifetime_def_for_shadowing(old_scope, &lifetime_i.lifetime);
for bound in &lifetime_i.bounds {
match bound.name {
hir::LifetimeName::Underscore => {
let mut err = struct_span_err!(self.sess, bound.span, E0637,
"invalid lifetime bound name: `'_`");
err.span_label(bound.span, "`'_` is a reserved lifetime name");
err.emit();
}
hir::LifetimeName::Static => {
self.insert_lifetime(bound, Region::Static);
self.sess.struct_span_warn(lifetime_i.lifetime.span.to(bound.span),
&format!("unnecessary lifetime parameter `{}`",
lifetime_i.lifetime.name.name()))
.help(&format!(
"you can use the `'static` lifetime directly, in place \
of `{}`", lifetime_i.lifetime.name.name()))
.emit();
}
hir::LifetimeName::Implicit |
hir::LifetimeName::Name(_) => {
self.resolve_lifetime_ref(bound);
}
}
}
}
}
fn check_lifetime_def_for_shadowing(&self,
mut old_scope: ScopeRef,
lifetime: &hir::Lifetime)
{
for &(label, label_span) in &self.labels_in_fn {
// FIXME (#24278): non-hygienic comparison
if lifetime.name.name() == label {
signal_shadowing_problem(self.sess,
label,
original_label(label_span),
shadower_lifetime(&lifetime));
return;
}
}
loop {
match *old_scope {
Scope::Body { s, .. } |
Scope::Elision { s, .. } |
Scope::ObjectLifetimeDefault { s, .. } => {
old_scope = s;
}
Scope::Root => {
return;
}
Scope::Binder { ref lifetimes, s, next_early_index: _ } => {
if let Some(&def) = lifetimes.get(&lifetime.name) {
let node_id = self.hir_map
.as_local_node_id(def.id().unwrap())
.unwrap();
signal_shadowing_problem(
self.sess,
lifetime.name.name(),
original_lifetime(self.hir_map.span(node_id)),
shadower_lifetime(&lifetime));
return;
}
old_scope = s;
}
}
}
}
fn insert_lifetime(&mut self,
lifetime_ref: &hir::Lifetime,
def: Region) {
if lifetime_ref.id == ast::DUMMY_NODE_ID {
span_bug!(lifetime_ref.span,
"lifetime reference not renumbered, \
probably a bug in syntax::fold");
}
debug!("insert_lifetime: {} resolved to {:?} span={:?}",
self.hir_map.node_to_string(lifetime_ref.id),
def,
self.sess.codemap().span_to_string(lifetime_ref.span));
self.map.defs.insert(lifetime_ref.id, def);
}
}
///////////////////////////////////////////////////////////////////////////
/// Detects late-bound lifetimes and inserts them into
/// `map.late_bound`.
///
/// A region declared on a fn is **late-bound** if:
/// - it is constrained by an argument type;
/// - it does not appear in a where-clause.
///
/// "Constrained" basically means that it appears in any type but
/// not amongst the inputs to a projection. In other words, `<&'a
/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
fn insert_late_bound_lifetimes(map: &mut NamedRegionMap,
decl: &hir::FnDecl,
generics: &hir::Generics) {
debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics);
let mut constrained_by_input = ConstrainedCollector { regions: FxHashSet() };
for arg_ty in &decl.inputs {
constrained_by_input.visit_ty(arg_ty);
}
let mut appears_in_output = AllCollector {
regions: FxHashSet(),
};
intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
debug!("insert_late_bound_lifetimes: constrained_by_input={:?}",
constrained_by_input.regions);
// Walk the lifetimes that appear in where clauses.
//
// Subtle point: because we disallow nested bindings, we can just
// ignore binders here and scrape up all names we see.
let mut appears_in_where_clause = AllCollector {
regions: FxHashSet(),
};
for ty_param in generics.ty_params.iter() {
walk_list!(&mut appears_in_where_clause,
visit_ty_param_bound,
&ty_param.bounds);
}
walk_list!(&mut appears_in_where_clause,
visit_where_predicate,
&generics.where_clause.predicates);
// We need to collect argument impl Trait lifetimes as well,
// we do so here.
walk_list!(&mut appears_in_where_clause,
visit_ty,
decl.inputs.iter().filter(|ty| {
if let hir::TyImplTraitUniversal(..) = ty.node {
true
} else {
false
}
}));
for lifetime_def in &generics.lifetimes {
if !lifetime_def.bounds.is_empty() {
// `'a: 'b` means both `'a` and `'b` are referenced
appears_in_where_clause.visit_lifetime_def(lifetime_def);
}
}
debug!("insert_late_bound_lifetimes: appears_in_where_clause={:?}",
appears_in_where_clause.regions);
// Late bound regions are those that:
// - appear in the inputs
// - do not appear in the where-clauses
// - are not implicitly captured by `impl Trait`
for lifetime in &generics.lifetimes {
let name = lifetime.lifetime.name;
// appears in the where clauses? early-bound.
if appears_in_where_clause.regions.contains(&name) { continue; }
// does not appear in the inputs, but appears in the return type? early-bound.
if !constrained_by_input.regions.contains(&name) &&
appears_in_output.regions.contains(&name) {
continue;
}
debug!("insert_late_bound_lifetimes: \
lifetime {:?} with id {:?} is late-bound",
lifetime.lifetime.name, lifetime.lifetime.id);
let inserted = map.late_bound.insert(lifetime.lifetime.id);
assert!(inserted, "visited lifetime {:?} twice", lifetime.lifetime.id);
}
return;
struct ConstrainedCollector {
regions: FxHashSet<hir::LifetimeName>,
}
impl<'v> Visitor<'v> for ConstrainedCollector {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'v hir::Ty) {
match ty.node {
hir::TyPath(hir::QPath::Resolved(Some(_), _)) |
hir::TyPath(hir::QPath::TypeRelative(..)) => {
// ignore lifetimes appearing in associated type
// projections, as they are not *constrained*
// (defined above)
}
hir::TyPath(hir::QPath::Resolved(None, ref path)) => {
// consider only the lifetimes on the final
// segment; I am not sure it's even currently
// valid to have them elsewhere, but even if it
// is, those would be potentially inputs to
// projections
if let Some(last_segment) = path.segments.last() {
self.visit_path_segment(path.span, last_segment);
}
}
_ => {
intravisit::walk_ty(self, ty);
}
}
}
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
self.regions.insert(lifetime_ref.name);
}
}
struct AllCollector {
regions: FxHashSet<hir::LifetimeName>,
}
impl<'v> Visitor<'v> for AllCollector {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
self.regions.insert(lifetime_ref.name);
}
}
}
| 39.114133 | 100 | 0.495194 |
e4e24d61c72ef77e93d5bf05c02cb1b382ee10b9 | 1,818 | use async_trait::async_trait;
use deadqueue::unlimited::Queue;
use once_cell::sync::Lazy;
use std::sync::Arc;
mod delete_service;
mod plan_update;
mod update_service;
pub use delete_service::DeleteService;
pub use plan_update::PlanUpdate;
pub use update_service::UpdateService;
pub type JobQueue = Queue<Box<dyn Job>>;
static STATIC_INSTANCE: Lazy<Arc<JobQueue>> = Lazy::new(|| Arc::from(JobQueue::new()));
/// Dispatch a job to one of the processors
pub fn dispatch(job: impl Job + 'static) {
STATIC_INSTANCE.push(Box::new(job));
}
/// Retrieve an instance of the queue
pub fn instance() -> Arc<JobQueue> {
STATIC_INSTANCE.clone()
}
/// A job that can be run in a separate thread
#[async_trait]
pub trait Job: Send + Sync {
/// Run the job
async fn run(&self);
/// The name of the job
fn name<'a>(&self) -> &'a str;
}
/// Log error and stop execution from within a job. A notification
/// with the specified event and args will also be sent.
///
/// This is intended to be called from a macro within the function so extra
/// arguments are automatically passed in.
///
/// **NOTE:** a status parameter is automatically added with the error.
#[macro_export]
macro_rules! fail_notify {
($event:ident , $( $arg:expr ),* ; $result:expr ; $message:expr) => {
match $result {
Ok(v) => v,
Err(e) => {
use $crate::notifier::{notify, Event, State};
use std::error::Error;
match e.source() {
Some(s) => tracing::error!(error = %e, source = %s, $message),
None => tracing::error!(error = %e, $message),
}
notify(Event::$event( $( $arg ),*, State::Failure(e.to_string()) )).await;
return;
}
}
};
}
| 27.969231 | 90 | 0.60341 |
91f37376bb7e0aa164790435470a9836fa7aa66a | 8,695 | // Copyright 2017 Databricks, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Code to handle reading and representing .kube/config files.
use chrono::{DateTime, Local, TimeZone};
use chrono::offset::Utc;
use duct::cmd;
use serde_json::{self, Value};
use serde_yaml;
use std::cell::RefCell;
use std::error::Error;
use std::fs::File;
use std::io;
//use error::{KubeErrNo, KubeError};
//use kube::{Kluster, KlusterAuth};
//use certs::{get_cert, get_cert_from_pem, get_key_from_str, get_private_key};
/// Kubernetes cluster config
#[derive(Debug, Deserialize)]
pub struct Config {
pub clusters: Vec<Cluster>,
pub contexts: Vec<Context>,
pub users: Vec<User>,
}
impl Config {
pub fn from_file(path: &str) -> Result<Config, io::Error> {
let f = File::open(path)?;
serde_yaml::from_reader(f).map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!("Couldn't read yaml in '{}': {}", path, e.description()),
)
})
}
}
#[derive(Debug, Deserialize)]
pub struct Cluster {
pub name: String,
#[serde(rename = "cluster")] pub conf: ClusterConf,
}
#[derive(Debug, Deserialize)]
pub struct ClusterConf {
#[serde(rename = "certificate-authority")] pub cert: Option<String>,
#[serde(rename = "certificate-authority-data")] pub cert_data: Option<String>,
#[serde(rename = "insecure-skip-tls-verify")] pub skip_tls: Option<bool>,
pub server: String,
}
#[derive(Debug, Deserialize)]
pub struct Context {
pub name: String,
#[serde(rename = "context")] pub conf: ContextConf,
}
#[derive(Debug, Deserialize)]
pub struct User {
pub name: String,
#[serde(rename = "user")] pub conf: UserConf,
}
/// This represents what we can find in a user in the actual config file (note the Deserialize).
/// Hence all the optional fields. At some point we should write a custom deserializer for this to
/// make it cleaner
#[derive(Debug, Deserialize, Clone)]
pub struct UserConf {
pub token: Option<String>,
#[serde(rename = "client-certificate")] pub client_cert: Option<String>,
#[serde(rename = "client-key")] pub client_key: Option<String>,
#[serde(rename = "client-certificate-data")] pub client_cert_data: Option<String>,
#[serde(rename = "client-key-data")] pub client_key_data: Option<String>,
pub username: Option<String>,
pub password: Option<String>,
#[serde(rename = "auth-provider")] pub auth_provider: Option<AuthProvider>,
}
#[derive(Debug, Deserialize, Clone)]
pub struct ContextConf {
pub cluster: String,
//#[serde(default = "default")]
pub namespace: Option<String>,
pub user: String,
}
// Classes to hold deserialized data for auth
#[derive(Debug, Deserialize, Clone)]
pub struct AuthProvider {
name: String,
pub token: RefCell<Option<String>>,
pub expiry: RefCell<Option<String>>,
pub config: AuthProviderConfig,
}
impl AuthProvider {
// Copy the token and expiry out of the config into the refcells
pub fn copy_up(&self) {
let mut token = self.token.borrow_mut();
if self.name == "oidc" {
*token = self.config.id_token.clone();
} else {
*token = self.config.access_token.clone();
}
let mut expiry = self.expiry.borrow_mut();
*expiry = self.config.expiry.clone();
}
// true if expiry is before now
fn check_dt<T: TimeZone>(&self, expiry: DateTime<T>) -> bool {
let etime = expiry.with_timezone(&Utc);
let now = Utc::now();
etime < now
}
fn is_expired(&self) -> bool {
if self.name == "oidc" {
return false;
}
let expiry = self.expiry.borrow();
match *expiry {
Some(ref e) => {
// Somehow google sometimes puts a date like "2018-03-31 22:22:01" in the config
// and other times like "2018-04-01T05:57:31Z", so we have to try both. wtf google.
if let Ok(expiry) = DateTime::parse_from_rfc3339(e) {
self.check_dt(expiry)
} else if let Ok(expiry) = Local.datetime_from_str(e, "%Y-%m-%d %H:%M:%S") {
self.check_dt(expiry)
} else {
true
}
}
None => {
println!("No expiry set, cannot validate if token is still valid");
false
}
}
}
// Turn a {.credential.expiry_key} type string into a serde_json pointer string like
// /credential/expiry_key
fn make_pointer(&self, s: &str) -> String {
let l = s.len() - 1;
let split = &s[1..l].split('.');
split.clone().collect::<Vec<&str>>().join("/")
}
fn update_token(&self, token: &mut Option<String>, expiry: &mut Option<String>) {
match self.config.cmd_path {
Some(ref conf_cmd) => {
let args = self.config
.cmd_args
.as_ref()
.map(|argstr| argstr.split_whitespace().collect())
.unwrap_or(vec![]);
match cmd(conf_cmd, &args).read() {
Ok(output) => {
let v: Value = serde_json::from_str(output.as_str()).unwrap();
let mut updated_token = false;
match self.config.token_key.as_ref() {
Some(ref tk) => {
let token_pntr = self.make_pointer(tk.as_str());
let extracted_token =
v.pointer(token_pntr.as_str()).and_then(|tv| tv.as_str());
*token = extracted_token.map(|t| t.to_owned());
updated_token = true;
}
None => {
println!("No token-key in auth-provider, cannot extract token");
}
}
if updated_token {
match self.config.expiry_key.as_ref() {
Some(ref ek) => {
let expiry_pntr = self.make_pointer(ek.as_str());
let extracted_expiry =
v.pointer(expiry_pntr.as_str()).and_then(|ev| ev.as_str());
*expiry = extracted_expiry.map(|e| e.to_owned());
}
None => {
println!(
"No expiry-key in config, will have to pull a new \
token on every command"
);
}
}
}
}
Err(e) => {
println!("Failed to run update command: {}", e);
}
}
}
None => {
println!("No update command specified, can't update");
}
}
}
/// Checks that we have a valid token, and if not, attempts to update it based on the config
pub fn ensure_token(&self) -> Option<String> {
let mut token = self.token.borrow_mut();
if token.is_none() || self.is_expired() {
// update
let mut expiry = self.expiry.borrow_mut();
*token = None;
self.update_token(&mut token, &mut expiry)
}
token.clone()
}
}
#[derive(Debug, Deserialize, Clone)]
pub struct AuthProviderConfig {
#[serde(rename = "access-token")] pub access_token: Option<String>,
#[serde(rename = "id-token")] pub id_token: Option<String>,
expiry: Option<String>,
#[serde(rename = "cmd-args")] cmd_args: Option<String>,
#[serde(rename = "cmd-path")] cmd_path: Option<String>,
#[serde(rename = "expiry-key")] expiry_key: Option<String>,
#[serde(rename = "token-key")] token_key: Option<String>,
}
| 35.060484 | 100 | 0.540196 |
d6677f65a8046d97fb07a01de269426ffdcb6255 | 19,513 | use crate::cx::*;
use makepad_microserde::*;
use std::any::TypeId;
use std::collections::{HashMap,BTreeSet};
#[derive(Clone, Debug, PartialEq, Default)]
pub struct KeyModifiers {
pub shift: bool,
pub control: bool,
pub alt: bool,
pub logo: bool
}
#[derive(Clone, Debug, PartialEq)]
pub enum FingerInputType{
Mouse,
Touch,
XR
}
pub const MOUSE_LEFT_BUTTON_DIGIT: usize = 0;
pub const MOUSE_RIGHT_BUTTON_DIGIT: usize = 1;
pub const MOUSE_MIDDLE_BUTTON_DIGIT: usize = 2;
impl FingerInputType{
pub fn is_touch(&self)->bool{*self == FingerInputType::Touch}
pub fn is_mouse(&self)->bool{*self == FingerInputType::Mouse}
pub fn is_xr(&self)->bool{*self == FingerInputType::XR}
pub fn has_hovers(&self)->bool{ *self == FingerInputType::Mouse || *self == FingerInputType::XR}
}
impl Default for FingerInputType{
fn default()->Self{Self::Mouse}
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FingerDownEvent {
pub window_id: usize,
pub abs: Vec2,
pub rel: Vec2,
pub rect: Rect,
pub digit: usize,
pub tap_count: u32,
pub handled: bool,
pub input_type: FingerInputType,
pub modifiers: KeyModifiers,
pub time: f64
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FingerMoveEvent {
pub window_id: usize,
pub abs: Vec2,
pub abs_start: Vec2,
pub rel: Vec2,
pub rel_start: Vec2,
pub rect: Rect,
pub is_over: bool,
pub digit: usize,
pub input_type: FingerInputType,
pub modifiers: KeyModifiers,
pub time: f64
}
impl FingerMoveEvent {
pub fn move_distance(&self) -> f32 {
((self.abs_start.x - self.abs.x).powf(2.) + (self.abs_start.y - self.abs.y).powf(2.)).sqrt()
}
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FingerUpEvent {
pub window_id: usize,
pub abs: Vec2,
pub abs_start: Vec2,
pub rel: Vec2,
pub rel_start: Vec2,
pub rect: Rect,
pub digit: usize,
pub is_over: bool,
pub input_type: FingerInputType,
pub modifiers: KeyModifiers,
pub time: f64
}
#[derive(Clone, Debug, PartialEq)]
pub enum HoverState {
In,
Over,
Out
}
impl Default for HoverState {
fn default() -> HoverState {
HoverState::Over
}
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FingerHoverEvent {
pub window_id: usize,
pub digit: usize,
pub abs: Vec2,
pub rel: Vec2,
pub rect: Rect,
pub any_down: bool,
pub handled: bool,
pub hover_state: HoverState,
pub modifiers: KeyModifiers,
pub time: f64
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FingerScrollEvent {
pub window_id: usize,
pub digit: usize,
pub abs: Vec2,
pub rel: Vec2,
pub rect: Rect,
pub scroll: Vec2,
pub input_type: FingerInputType,
//pub is_wheel: bool,
pub handled_x: bool,
pub handled_y: bool,
pub modifiers: KeyModifiers,
pub time: f64
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct WindowGeomChangeEvent {
pub window_id: usize,
pub old_geom: WindowGeom,
pub new_geom: WindowGeom,
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct WindowMovedEvent {
pub window_id: usize,
pub old_pos: Vec2,
pub new_pos: Vec2,
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct AnimateEvent {
pub frame: u64,
pub time: f64
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct NextFrameEvent {
pub frame: u64,
pub time: f64
}
#[derive(Clone, Debug, PartialEq)]
pub struct FileReadEvent {
pub read_id: u64,
pub data: Result<Vec<u8>, String>
}
#[derive(Clone, Debug, PartialEq)]
pub struct TimerEvent {
pub timer_id: u64
}
#[derive(Clone, Debug, PartialEq)]
pub struct SignalEvent {
pub signals: HashMap<Signal, BTreeSet<StatusId>>
}
#[derive(Clone, Debug, PartialEq)]
pub struct TriggersEvent {
pub triggers: HashMap<Area, BTreeSet<TriggerId>>
}
#[derive(Clone, Debug, PartialEq)]
pub struct TriggerEvent {
pub triggers: BTreeSet<TriggerId>
}
#[derive(Clone, Debug, PartialEq)]
pub struct FileWriteEvent {
id: u64,
error: Option<String>
}
#[derive(Clone, Debug, PartialEq)]
pub struct LiveRecompileEvent {
pub changed_live_bodies: BTreeSet<LiveBodyId>,
pub errors: Vec<LiveBodyError>
}
#[derive(Clone, Debug, PartialEq)]
pub struct KeyEvent {
pub key_code: KeyCode,
//pub key_char: char,
pub is_repeat: bool,
pub modifiers: KeyModifiers,
pub time: f64
}
#[derive(Clone, Debug, PartialEq)]
pub struct KeyFocusEvent {
pub prev: Area,
pub focus: Area,
}
#[derive(Clone, Debug, PartialEq)]
pub struct TextInputEvent {
pub input: String,
pub replace_last: bool,
pub was_paste: bool
}
#[derive(Clone, Debug, PartialEq)]
pub struct TextCopyEvent {
pub response: Option<String>
}
#[derive(Clone, Debug, PartialEq)]
pub struct WindowCloseRequestedEvent {
pub window_id: usize,
pub accept_close: bool
}
#[derive(Clone, Debug, PartialEq)]
pub struct WindowClosedEvent {
pub window_id: usize
}
#[derive(Clone, Debug, PartialEq)]
pub struct WindowResizeLoopEvent {
pub was_started: bool,
pub window_id: usize
}
#[derive(Clone, Debug, PartialEq)]
pub enum WindowDragQueryResponse {
NoAnswer,
Client,
Caption,
SysMenu, // windows only
}
#[derive(Clone, Debug, PartialEq)]
pub struct WindowDragQueryEvent {
pub window_id: usize,
pub abs: Vec2,
pub response: WindowDragQueryResponse,
}
#[derive(Clone, Debug, Default, SerBin, DeBin, PartialEq)]
pub struct XRButton {
pub value:f32,
pub pressed:bool
}
#[derive(Clone, Debug, Default, SerBin, DeBin,PartialEq)]
pub struct XRInput {
pub active: bool,
pub grip: Transform,
pub ray: Transform,
pub num_buttons: usize,
pub buttons: [XRButton;8],
pub num_axes: usize,
pub axes: [f32;8],
}
#[derive(Clone, Debug, SerBin, DeBin, PartialEq)]
pub struct XRUpdateEvent {
// alright what data are we stuffing in
pub time: f64,
pub head_transform: Transform,
pub left_input: XRInput,
pub last_left_input: XRInput,
pub right_input: XRInput,
pub last_right_input: XRInput,
pub other_inputs: Vec<XRInput>
}
#[derive(Clone, Debug, PartialEq)]
pub struct WebSocketMessageEvent{
pub url: String,
pub result: Result<Vec<u8>, String>
}
#[derive(Clone, Debug, PartialEq)]
pub enum Event {
None,
Construct,
Destruct,
Draw,
Paint,
AppFocus,
AppFocusLost,
AnimEnded(AnimateEvent),
Animate(AnimateEvent),
NextFrame(NextFrameEvent),
XRUpdate(XRUpdateEvent),
WindowSetHoverCursor(MouseCursor),
WindowDragQuery(WindowDragQueryEvent),
WindowCloseRequested(WindowCloseRequestedEvent),
WindowClosed(WindowClosedEvent),
WindowGeomChange(WindowGeomChangeEvent),
WindowResizeLoop(WindowResizeLoopEvent),
FingerDown(FingerDownEvent),
FingerMove(FingerMoveEvent),
FingerHover(FingerHoverEvent),
FingerUp(FingerUpEvent),
FingerScroll(FingerScrollEvent),
FileRead(FileReadEvent),
FileWrite(FileWriteEvent),
Timer(TimerEvent),
Signal(SignalEvent),
Triggers(TriggersEvent),
Trigger(TriggerEvent),
Command(CommandId),
KeyFocus(KeyFocusEvent),
KeyFocusLost(KeyFocusEvent),
KeyDown(KeyEvent),
KeyUp(KeyEvent),
TextInput(TextInputEvent),
TextCopy(TextCopyEvent),
LiveRecompile(LiveRecompileEvent),
WebSocketMessage(WebSocketMessageEvent),
}
impl Default for Event {
fn default() -> Event {
Event::None
}
}
pub enum HitTouch {
Single,
Multi
}
#[derive(Clone, Debug, Default)]
pub struct HitOpt {
pub use_multi_touch: bool,
pub margin: Option<Margin>,
}
impl Event {
pub fn is_next_frame(&self, cx:&mut Cx, next_frame: NextFrame)->Option<NextFrameEvent>{
match self {
Event::NextFrame(fe) => {
if cx._next_frames.contains(&next_frame){
return Some(fe.clone())
}
}
_=>()
}
None
}
pub fn is_animate(&self, cx:&mut Cx, animator: &Animator)->Option<AnimateEvent>{
match self {
Event::Animate(ae) => {
if cx.playing_animator_ids.get(&animator.animator_id).is_some(){
return Some(ae.clone())
}
}
_=>()
}
None
}
pub fn hits(&mut self, cx: &mut Cx, area: Area, opt: HitOpt) -> Event {
match self {
Event::KeyFocus(kf) => {
if area == kf.prev {
return Event::KeyFocusLost(kf.clone())
}
else if area == kf.focus {
return Event::KeyFocus(kf.clone())
}
},
Event::KeyDown(_) => {
if area == cx.key_focus {
return self.clone();
}
},
Event::KeyUp(_) => {
if area == cx.key_focus {
return self.clone();
}
},
Event::TextInput(_) => {
if area == cx.key_focus {
return self.clone();
}
},
Event::TextCopy(_) => {
if area == cx.key_focus {
return Event::TextCopy(
TextCopyEvent {response: None}
);
}
},
Event::Triggers(te) => {
if let Some(triggers) = te.triggers.get(&area).cloned(){
return Event::Trigger(TriggerEvent{triggers})
}
}
Event::FingerScroll(fe) => {
let rect = area.get_rect(&cx);
if rect.contains_with_margin(fe.abs, &opt.margin) {
//fe.handled = true;
return Event::FingerScroll(FingerScrollEvent {
rel: fe.abs - rect.pos,
rect: rect,
..fe.clone()
})
}
},
Event::FingerHover(fe) => {
let rect = area.get_rect(&cx);
if cx.fingers[fe.digit]._over_last == area {
let mut any_down = false;
for finger in &cx.fingers {
if finger.captured == area {
any_down = true;
break;
}
}
if !fe.handled && rect.contains_with_margin(fe.abs, &opt.margin) {
fe.handled = true;
if let HoverState::Out = fe.hover_state {
// cx.finger_over_last_area = Area::Empty;
}
else {
cx.fingers[fe.digit].over_last = area;
}
return Event::FingerHover(FingerHoverEvent {
rel: area.abs_to_rel(cx, fe.abs),
rect: rect,
any_down:any_down,
..fe.clone()
})
}
else {
//self.was_over_last_call = false;
return Event::FingerHover(FingerHoverEvent {
rel: area.abs_to_rel(cx, fe.abs),
rect: rect,
any_down:any_down,
hover_state: HoverState::Out,
..fe.clone()
})
}
}
else {
if !fe.handled && rect.contains_with_margin(fe.abs, &opt.margin) {
let mut any_down = false;
for finger in &cx.fingers {
if finger.captured == area {
any_down = true;
break;
}
}
cx.fingers[fe.digit].over_last = area;
fe.handled = true;
//self.was_over_last_call = true;
return Event::FingerHover(FingerHoverEvent {
rel: area.abs_to_rel(cx, fe.abs),
rect: rect,
any_down:any_down,
hover_state: HoverState::In,
..fe.clone()
})
}
}
},
Event::FingerMove(fe) => {
// check wether our digit is captured, otherwise don't send
if cx.fingers[fe.digit].captured == area {
let abs_start = cx.fingers[fe.digit].down_abs_start;
let rel_start = cx.fingers[fe.digit].down_rel_start;
let rect = area.get_rect(&cx);
return Event::FingerMove(FingerMoveEvent {
abs_start: abs_start,
rel: area.abs_to_rel(cx, fe.abs),
rel_start: rel_start,
rect: rect,
is_over: rect.contains_with_margin(fe.abs, &opt.margin),
..fe.clone()
})
}
},
Event::FingerDown(fe) => {
if !fe.handled {
let rect = area.get_rect(&cx);
if rect.contains_with_margin(fe.abs, &opt.margin) {
// scan if any of the fingers already captured this area
if !opt.use_multi_touch {
for finger in &cx.fingers {
if finger.captured == area {
return Event::None;
}
}
}
cx.fingers[fe.digit].captured = area;
let rel = area.abs_to_rel(cx, fe.abs);
cx.fingers[fe.digit].down_abs_start = fe.abs;
cx.fingers[fe.digit].down_rel_start = rel;
fe.handled = true;
return Event::FingerDown(FingerDownEvent {
rel: rel,
rect: rect,
..fe.clone()
})
}
}
},
Event::FingerUp(fe) => {
if cx.fingers[fe.digit].captured == area {
cx.fingers[fe.digit].captured = Area::Empty;
let abs_start = cx.fingers[fe.digit].down_abs_start;
let rel_start = cx.fingers[fe.digit].down_rel_start;
let rect = area.get_rect(&cx);
return Event::FingerUp(FingerUpEvent {
is_over: rect.contains(fe.abs),
abs_start: abs_start,
rel_start: rel_start,
rel: area.abs_to_rel(cx, fe.abs),
rect: rect,
..fe.clone()
})
}
},
_ => ()
};
return Event::None;
}
}
#[derive(Hash, Eq, PartialEq, Clone, Copy, Debug, Default)]
pub struct Signal {
pub signal_id: usize
}
impl Signal {
pub fn empty() -> Signal {
Signal {
signal_id: 0
}
}
pub fn is_empty(&self) -> bool {
self.signal_id == 0
}
}
// Status
#[derive(PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Eq, Debug)]
pub struct StatusId(pub TypeId);
impl Into<StatusId> for TypeId {
fn into(self) -> StatusId {StatusId(self)}
}
#[derive(PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Eq, Debug)]
pub struct TriggerId(pub TypeId);
impl Into<TriggerId> for TypeId {
fn into(self) -> TriggerId {TriggerId(self)}
}
#[derive(Clone, Debug, Default)]
pub struct FileRead {
pub path: String,
pub read_id: u64
}
impl FileRead {
pub fn is_pending(&self) -> bool {
self.read_id != 0
}
pub fn resolve_utf8<'a>(&mut self, fr: &'a FileReadEvent) -> Option<Result<&'a str,String>> {
if fr.read_id == self.read_id {
self.read_id = 0;
if let Ok(str_data) = &fr.data {
if let Ok(utf8_string) = std::str::from_utf8(&str_data) {
return Some(Ok(utf8_string))
}
else {
return Some(Err(format!("can't parse file as utf8 {}", self.path)))
}
}
else if let Err(err) = &fr.data {
return Some(Err(format!("can't load file as utf8 {} {}", self.path, err)))
}
}
return None
}
}
#[derive(Clone, Debug, Default)]
pub struct Timer {
pub timer_id: u64
}
impl Timer {
pub fn empty() -> Timer {
Timer {
timer_id: 0,
}
}
pub fn is_empty(&self) -> bool {
self.timer_id == 0
}
pub fn is_timer(&mut self, te: &TimerEvent) -> bool {
te.timer_id == self.timer_id
}
}
impl Event {
pub fn set_handled(&mut self, set: bool) {
match self {
Event::FingerHover(fe) => {
fe.handled = set;
},
Event::FingerDown(fe) => {
fe.handled = set;
},
_ => ()
}
}
pub fn handled(&self) -> bool {
match self {
Event::FingerHover(fe) => {
fe.handled
},
Event::FingerDown(fe) => {
fe.handled
},
_ => false
}
}
}
// lowest common denominator keymap between desktop and web
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum KeyCode {
Escape,
Backtick,
Key0,
Key1,
Key2,
Key3,
Key4,
Key5,
Key6,
Key7,
Key8,
Key9,
Minus,
Equals,
Backspace,
Tab,
KeyQ,
KeyW,
KeyE,
KeyR,
KeyT,
KeyY,
KeyU,
KeyI,
KeyO,
KeyP,
LBracket,
RBracket,
Return,
KeyA,
KeyS,
KeyD,
KeyF,
KeyG,
KeyH,
KeyJ,
KeyK,
KeyL,
Semicolon,
Quote,
Backslash,
KeyZ,
KeyX,
KeyC,
KeyV,
KeyB,
KeyN,
KeyM,
Comma,
Period,
Slash,
Control,
Alt,
Shift,
Logo,
//RightControl,
//RightShift,
//RightAlt,
//RightLogo,
Space,
Capslock,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
PrintScreen,
Scrolllock,
Pause,
Insert,
Delete,
Home,
End,
PageUp,
PageDown,
Numpad0,
Numpad1,
Numpad2,
Numpad3,
Numpad4,
Numpad5,
Numpad6,
Numpad7,
Numpad8,
Numpad9,
NumpadEquals,
NumpadSubtract,
NumpadAdd,
NumpadDecimal,
NumpadMultiply,
NumpadDivide,
Numlock,
NumpadEnter,
ArrowUp,
ArrowDown,
ArrowLeft,
ArrowRight,
Unknown
}
impl Default for KeyCode{
fn default()->Self{KeyCode::Unknown}
} | 24.857325 | 100 | 0.518065 |
e87e02040b6ae1a81f6388d10895853fb704de79 | 5,315 | use libc::*;
use *;
const EVP_PKEY_CTRL_TLS_MD: c_int = EVP_PKEY_ALG_CTRL;
const EVP_PKEY_CTRL_TLS_SECRET: c_int = EVP_PKEY_ALG_CTRL + 1;
const EVP_PKEY_CTRL_TLS_SEED: c_int = EVP_PKEY_ALG_CTRL + 2;
const EVP_PKEY_CTRL_HKDF_MD: c_int = EVP_PKEY_ALG_CTRL + 3;
const EVP_PKEY_CTRL_HKDF_SALT: c_int = EVP_PKEY_ALG_CTRL + 4;
const EVP_PKEY_CTRL_HKDF_KEY: c_int = EVP_PKEY_ALG_CTRL + 5;
const EVP_PKEY_CTRL_HKDF_INFO: c_int = EVP_PKEY_ALG_CTRL + 6;
const EVP_PKEY_CTRL_HKDF_MODE: c_int = EVP_PKEY_ALG_CTRL + 7;
const EVP_PKEY_CTRL_PASS: c_int = EVP_PKEY_ALG_CTRL + 8;
const EVP_PKEY_CTRL_SCRYPT_SALT: c_int = EVP_PKEY_ALG_CTRL + 9;
const EVP_PKEY_CTRL_SCRYPT_N: c_int = EVP_PKEY_ALG_CTRL + 10;
const EVP_PKEY_CTRL_SCRYPT_R: c_int = EVP_PKEY_ALG_CTRL + 11;
const EVP_PKEY_CTRL_SCRYPT_P: c_int = EVP_PKEY_ALG_CTRL + 12;
const EVP_PKEY_CTRL_SCRYPT_MAXMEM_BYTES: c_int = EVP_PKEY_ALG_CTRL + 13;
const EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND: c_int = 0;
const EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY: c_int = 1;
const EVP_PKEY_HKDEF_MODE_EXPAND_ONLY: c_int = 2;
pub unsafe extern "C" fn EVP_PKEY_CTX_set_tls1_prf_md(
pctx: *mut crate::EVP_PKEY_CTX,
md: *const crate::EVP_MD,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_TLS_MD,
0,
md as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set1_tls1_prf_secret(
pctx: *mut crate::EVP_PKEY_CTX,
sec: *mut c_uchar,
seclen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_TLS_SECRET,
seclen,
sec as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_add1_tls1_prf_seed(
pctx: *mut crate::EVP_PKEY_CTX,
seed: *mut c_uchar,
seedlen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_TLS_SEED,
seedlen,
seed as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set_hkdf_md(
pctx: *mut crate::EVP_PKEY_CTX,
md: *const crate::EVP_MD,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_HKDF_MD,
0,
md as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set1_hkdf_salt(
pctx: *mut crate::EVP_PKEY_CTX,
salt: *mut c_uchar,
saltlen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_HKDF_SALT,
saltlen,
salt as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set1_hkdf_key(
pctx: *mut crate::EVP_PKEY_CTX,
key: *mut c_uchar,
keylen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_HKDF_KEY,
keylen,
key as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_add1_hkdf_info(
pctx: *mut crate::EVP_PKEY_CTX,
info: *mut c_uchar,
infolen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_HKDF_INFO,
infolen,
info as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_hkdf_mode(
pctx: *mut crate::EVP_PKEY_CTX,
mode: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_HKDF_MODE,
mode,
std::ptr::null_mut(),
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set1_pbe_pass(
pctx: *mut crate::EVP_PKEY_CTX,
pass: *mut c_uchar,
passlen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_PASS,
passlen,
pass as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set1_scrypt_salt(
pctx: *mut crate::EVP_PKEY_CTX,
salt: *mut c_uchar,
saltlen: c_int,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_SCRYPT_SALT,
saltlen,
salt as *mut c_void,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set_scrypt_N(
pctx: *mut crate::EVP_PKEY_CTX,
n: u64,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl_uint64(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_SCRYPT_N,
n,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set_scrypt_r(
pctx: *mut crate::EVP_PKEY_CTX,
r: u64,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl_uint64(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_SCRYPT_R,
r,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set_scrypt_p(
pctx: *mut crate::EVP_PKEY_CTX,
p: u64,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl_uint64(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_SCRYPT_P,
p,
)
}
pub unsafe extern "C" fn EVP_PKEY_CTX_set_scrypt_maxmem_bytes(
pctx: *mut crate::EVP_PKEY_CTX,
maxmem_bytes: u64,
) -> c_int {
crate::EVP_PKEY_CTX_ctrl_uint64(
pctx,
-1,
crate::EVP_PKEY_OP_DERIVE,
EVP_PKEY_CTRL_SCRYPT_MAXMEM_BYTES,
maxmem_bytes,
)
}
| 24.049774 | 72 | 0.64158 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.