file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
implementation.rs
|
CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if!self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1)!= 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(10, progress_modal, progress_action);
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word) in sensitive_slice[KeyRomLocs::FPGA_KEY as usize..KeyRomLocs::FPGA_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()
.zip(pcache.hashed_update_pw.chunks(4).into_iter()) {
*word = *word ^ u32::from_be_bytes(key_word.try_into().unwrap());
}
}
update_progress(50, progress_modal, progress_action);
// set the "init" bit in the staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
sensitive_slice[KeyRomLocs::CONFIG as usize] |= keyrom_config::INITIALIZED.ms(1);
}
update_progress(60, progress_modal, progress_action);
#[cfg(feature = "hazardous-debug")]
{
log::info!("Self private key: {:x?}", keypair.secret.to_bytes());
log::info!("Self public key: {:x?}", keypair.public.to_bytes());
self.debug_staging();
}
// Because we're initializing keys for the *first* time, make a backup copy of the bitstream to
// the staging area. Note that if we're doing an update, the update target would already be
// in the staging area, so this step should be skipped.
self.make_gateware_backup(60, 70, progress_modal, progress_action);
// compute the keyrom patch set for the bitstream
// at this point the KEYROM as replicated in sensitive_slice should have all its assets in place
patch::should_patch(42);
// finalize the progress bar on exit -- always leave at 100%
update_progress(100, progress_modal, progress_action);
true
}
fn make_gateware_backup(&mut self, prog_start: u32, prog_end: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
// println!("this is a test of stdlib");
}
#[cfg(feature = "hazardous-debug")]
fn
|
debug_staging
|
identifier_name
|
|
implementation.rs
|
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if!self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1)!= 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(10, progress_modal, progress_action);
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word) in sensitive_slice[KeyRomLocs::FPGA_KEY as usize..KeyRomLocs::FPGA_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()
.zip(pcache.hashed_update_pw.chunks(4).into_iter()) {
*word = *word ^ u32::from_be_bytes(key_word.try_into().unwrap());
}
}
update_progress(50, progress_modal, progress_action);
// set the "init" bit in the staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
sensitive_slice[KeyRomLocs::CONFIG as usize] |= keyrom_config::INITIALIZED.ms(1);
}
update_progress(60, progress_modal, progress_action);
#[cfg(feature = "hazardous-debug")]
{
log::info!("Self private key: {:x?}", keypair.secret.to_bytes());
log::info!("Self public key: {:x?}", keypair.public.to_bytes());
self.debug_staging();
}
// Because we're initializing keys for the *first* time, make a backup copy of the bitstream to
// the staging area. Note that if we're doing an update, the update target would already be
// in the staging area, so this step should be skipped.
self.make_gateware_backup(60, 70, progress_modal, progress_action);
// compute the keyrom patch set for the bitstream
// at this point the KEYROM as replicated in sensitive_slice should have all its assets in place
patch::should_patch(42);
// finalize the progress bar on exit -- always leave at 100%
update_progress(100, progress_modal, progress_action);
true
}
fn make_gateware_backup(&mut self, prog_start: u32, prog_end: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
// println!("this is a test of stdlib");
}
#[cfg(feature = "hazardous-debug")]
fn debug_staging(&self) {
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PRIVKEY as usize, 256, "Self private key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PUBKEY as usize, 256, "Self public key: ");
self.debug_print_key(KeyRomLocs::DEVELOPER_PUBKEY as usize, 256, "Dev public key: ");
self.debug_print_key(KeyRomLocs::THIRDPARTY_PUBKEY as usize, 256, "3rd party public key: ");
self.debug_print_key(KeyRomLocs::USER_KEY as usize, 256, "Boot key: ");
self.debug_print_key(KeyRomLocs::PEPPER as usize, 128, "Pepper: ");
self.debug_print_key(KeyRomLocs::CONFIG as usize, 32, "Config (as BE): ");
}
#[cfg(feature = "hazardous-debug")]
fn debug_print_key(&self, offset: usize, num_bits: usize, name: &str) {
use core::fmt::Write;
let mut debugstr = xous_ipc::String::<4096>::new();
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
write!(debugstr, "{}", name).unwrap();
for word in sensitive_slice[offset.. offset as usize + num_bits/(size_of::<u32>()*8)].iter() {
for byte in word.to_be_bytes().iter() {
write!(debugstr, "{:02x}", byte).unwrap();
}
}
log::info!("{}", debugstr);
}
pub fn sign_loader(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let loader_len =
xous::LOADER_CODE_LEN
- SIGBLOCK_SIZE
+ graphics_server::fontmap::FONT_TOTAL_LEN as u32
+ 8; // two u32 words are appended to the end, which repeat the "version" and "length" fields encoded in the signature block
// this is a huge hash, so, get a hardware hasher, even if it means waiting for it
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let loader_region = self.loader_code.as_slice::<u8>();
// the loader data starts one page in; the first page is reserved for the signature itself
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
// now get the font plane data
self.gfx.bulk_read_restart(); // reset the bulk read pointers on the gfx side
let bulkread = BulkRead::default();
let mut buf = xous_ipc::Buffer::into_buf(bulkread).expect("couldn't transform bulkread into aligned buffer");
// this form of loop was chosen to avoid the multiple re-initializations and copies that would be entailed
// in our usual idiom for pasing buffers around. instead, we create a single buffer, and re-use it for
// every iteration of the loop.
loop {
buf.lend_mut(self.gfx.conn(), self.gfx.bulk_read_fontmap_op()).expect("couldn't do bulkread from gfx");
let br = buf.as_flat::<BulkRead, _>().unwrap();
hasher.update(&br.buf[..br.len as usize]);
if br.len!= bulkread.buf.len() as u32 {
log::trace!("non-full block len: {}", br.len);
}
if br.len < bulkread.buf.len() as u32
|
{
// read until we get a buffer that's not fully filled
break;
}
|
conditional_block
|
|
implementation.rs
|
or Rust optimizers removing writes
sensitive_data: xous::MemoryRange, // this gets purged at least on every suspend, but ideally purged sooner than that
pass_cache: xous::MemoryRange, // this can be purged based on a policy, as set below
boot_password_policy: PasswordRetentionPolicy,
update_password_policy: PasswordRetentionPolicy,
cur_password_type: Option<PasswordType>, // for tracking which password we're dealing with at the UX layer
susres: susres::Susres, // for disabling suspend/resume
trng: trng::Trng,
gam: gam::Gam, // for raising UX elements directly
gfx: graphics_server::Gfx, // for reading out font planes for signing verification
spinor: spinor::Spinor,
}
impl RootKeys {
pub fn new(xns: &xous_names::XousNames) -> RootKeys {
let keyrom = xous::syscall::map_memory(
xous::MemoryAddress::new(utra::keyrom::HW_KEYROM_BASE),
None,
4096,
xous::MemoryFlags::R | xous::MemoryFlags::W,
)
.expect("couldn't map keyrom CSR range");
// read-only memory maps. even if we don't refer to them, we map them into our process
// so that no other processes can claim them
let gateware = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_MAIN_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_MAIN_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC gateware region");
let staging = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_STAGING_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_STAGING_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC staging region");
let loader_code = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::LOADER_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::LOADER_CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if!self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1)!= 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
|
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word)
|
update_progress(10, progress_modal, progress_action);
|
random_line_split
|
implementation.rs
|
r Rust optimizers removing writes
sensitive_data: xous::MemoryRange, // this gets purged at least on every suspend, but ideally purged sooner than that
pass_cache: xous::MemoryRange, // this can be purged based on a policy, as set below
boot_password_policy: PasswordRetentionPolicy,
update_password_policy: PasswordRetentionPolicy,
cur_password_type: Option<PasswordType>, // for tracking which password we're dealing with at the UX layer
susres: susres::Susres, // for disabling suspend/resume
trng: trng::Trng,
gam: gam::Gam, // for raising UX elements directly
gfx: graphics_server::Gfx, // for reading out font planes for signing verification
spinor: spinor::Spinor,
}
impl RootKeys {
pub fn new(xns: &xous_names::XousNames) -> RootKeys {
let keyrom = xous::syscall::map_memory(
xous::MemoryAddress::new(utra::keyrom::HW_KEYROM_BASE),
None,
4096,
xous::MemoryFlags::R | xous::MemoryFlags::W,
)
.expect("couldn't map keyrom CSR range");
// read-only memory maps. even if we don't refer to them, we map them into our process
// so that no other processes can claim them
let gateware = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_MAIN_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_MAIN_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC gateware region");
let staging = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_STAGING_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_STAGING_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC staging region");
let loader_code = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::LOADER_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::LOADER_CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32]
|
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if!self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1)!= 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(10, progress_modal, progress_action);
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_
|
{
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
|
identifier_body
|
utils.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Logs an error message if the passed in `result` is an error.
#[macro_export]
macro_rules! log_if_err {
($result:expr, $log_prefix:expr) => {
if let Err(e) = $result.as_ref() {
log::error!("{}: {}", $log_prefix, e);
}
};
}
/// Logs an error message if the provided `cond` evaluates to false. Also passes the same expression
/// and message into `debug_assert!`, which will panic if debug assertions are enabled.
#[macro_export]
macro_rules! log_if_false_and_debug_assert {
($cond:expr, $msg:expr) => {
if!($cond) {
log::error!($msg);
debug_assert!($cond, $msg);
}
};
($cond:expr, $fmt:expr, $($arg:tt)+) => {
if!($cond) {
log::error!($fmt, $($arg)+);
debug_assert!($cond, $fmt, $($arg)+);
}
};
}
#[cfg(test)]
mod log_err_with_debug_assert_tests {
use crate::log_if_false_and_debug_assert;
/// Tests that `log_if_false_and_debug_assert` panics for a false expression when debug
/// assertions are enabled.
#[test]
#[should_panic(expected = "this will panic")]
#[cfg(debug_assertions)]
fn test_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will panic");
}
/// Tests that `log_if_false_and_debug_assert` does not panic for a false expression when debug
/// assertions are not enabled.
#[test]
#[cfg(not(debug_assertions))]
fn test_non_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will not panic either");
}
}
/// Export the `connect_to_driver` function to be used throughout the crate.
pub use connect_to_driver::connect_to_driver;
mod connect_to_driver {
use anyhow::{format_err, Error};
use fidl::endpoints::Proxy as _;
use fidl_fuchsia_io::{NodeProxy, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE};
/// Returns a NodeProxy opened at `path`. The path is guaranteed to exist before the connection
/// is opened.
async fn connect_channel(path: &str) -> Result<NodeProxy, Error>
|
/// Connects to the driver at `path`, returning a proxy of the specified type. The path is
/// guaranteed to exist before the connection is opened.
///
/// TODO(fxbug.dev/81378): factor this function out to a common library
pub async fn connect_to_driver<T: fidl::endpoints::ProtocolMarker>(
path: &str,
) -> Result<T::Proxy, Error> {
match path.strip_prefix("/dev/") {
Some(path) => fidl::endpoints::ClientEnd::<T>::new(
connect_channel(path)
.await?
.into_channel()
.map_err(|_| format_err!("into_channel failed on NodeProxy"))?
.into_zx_channel(),
)
.into_proxy()
.map_err(Into::into),
None => Err(format_err!("Path must start with /dev/")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_utils::PollExt as _;
use fidl::endpoints::{create_proxy, Proxy, ServerEnd};
use fidl_fuchsia_io::{
DirectoryMarker, NodeMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE,
OPEN_RIGHT_WRITABLE,
};
use fuchsia_async as fasync;
use futures::TryStreamExt as _;
use std::sync::Arc;
use vfs::{
directory::entry::DirectoryEntry, directory::helper::DirectlyMutable,
execution_scope::ExecutionScope, file::vmo::read_only_static, pseudo_directory,
};
fn bind_to_dev(dir: Arc<dyn DirectoryEntry>) {
let (dir_proxy, dir_server) = create_proxy::<DirectoryMarker>().unwrap();
let scope = ExecutionScope::new();
dir.open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir_server.into_channel()),
);
let ns = fdio::Namespace::installed().unwrap();
ns.bind("/dev", dir_proxy.into_channel().unwrap().into_zx_channel()).unwrap();
}
/// Tests that `connect_to_driver` returns success for the valid existing path.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_success() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => pseudo_directory! {
"000" => read_only_static("string beans")
}
}
});
connect_to_driver::<NodeMarker>("/dev/class/thermal/000").await.unwrap();
}
/// Tests that `connect_to_driver` doesn't return until the required path is added.
#[test]
fn test_connect_to_driver_late_add() {
let mut executor = fasync::TestExecutor::new().unwrap();
let thermal_dir = pseudo_directory! {
"000" => read_only_static("string cheese (cheddar)")
};
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => thermal_dir.clone()
}
});
let connect_future =
&mut Box::pin(connect_to_driver::<NodeMarker>("/dev/class/thermal/001"));
// The required path is initially not present
assert!(executor.run_until_stalled(connect_future).is_pending());
// Add the required path
thermal_dir.add_entry("001", read_only_static("string cheese (mozzarella)")).unwrap();
// Verify the wait future now returns successfully
assert!(executor.run_until_stalled(connect_future).unwrap().is_ok());
}
/// Tests that `connect_to_driver` correctly waits even if the required parent directory
/// does not yet exist.
#[test]
fn test_connect_to_driver_nonexistent_parent_dir() {
let mut executor = fasync::TestExecutor::new().unwrap();
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("shoestring fries")
}
}
});
assert!(executor
.run_until_stalled(&mut Box::pin(connect_to_driver::<NodeMarker>(
"/dev/class/thermal/000"
)))
.is_pending());
}
/// Tests that the proxy returned by `connect_to_driver` is usable for sending a FIDL
/// request.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_gives_usable_proxy() {
use fidl_fuchsia_device as fdev;
// Create a pseudo directory with a fuchsia.device.Controller FIDL server hosted at
// class/fake_dev_controller and bind it to our /dev
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory! {
"fake_dev_controller" => vfs::service::host(
move |mut stream: fdev::ControllerRequestStream| {
async move {
match stream.try_next().await.unwrap() {
Some(fdev::ControllerRequest::GetCurrentPerformanceState {
responder
}) => {
let _ = responder.send(8);
}
e => panic!("Unexpected request: {:?}", e),
}
}
}
)
}
});
// Connect to the driver and call GetCurrentPerformanceState on it
let result = crate::utils::connect_to_driver::<fdev::ControllerMarker>(
"/dev/class/fake_dev_controller",
)
.await
.expect("Failed to connect to driver")
.get_current_performance_state()
.await
.expect("get_current_performance_state FIDL failed");
// Verify we receive the expected result
assert_eq!(result, 8);
}
/// Verifies that invalid arguments are rejected while valid ones are accepted.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_valid_path() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("stringtown population 1")
}
}
});
connect_to_driver::<NodeMarker>("/svc/fake_service").await.unwrap_err();
connect_to_driver::<NodeMarker>("/dev/class/cpu/000").await.unwrap();
}
}
}
/// The number of nanoseconds since the system was powered on.
pub fn get_current_timestamp() -> crate::types::Nanoseconds {
crate::types::Nanoseconds(fuchsia_async::Time::now().into_nanos())
}
use fidl_fuchsia_cobalt::HistogramBucket;
/// Convenient wrapper for creating and storing an integer histogram to use with Cobalt.
pub struct CobaltIntHistogram {
/// Underlying histogram data storage.
data: Vec<HistogramBucket>,
/// Number of data values that have been added to the histogram.
data_count: u32,
/// Histogram configuration parameters.
config: CobaltIntHistogramConfig,
}
/// Histogram configuration parameters used by CobaltIntHistogram.
pub struct CobaltIntHistogramConfig {
pub floor: i64,
pub num_buckets: u32,
pub step_size: u32,
}
impl CobaltIntHistogram {
/// Create a new CobaltIntHistogram.
pub fn new(config: CobaltIntHistogramConfig) -> Self {
Self { data: Self::new_vec(config.num_buckets), data_count: 0, config }
}
/// Create a new Vec<HistogramBucket> that represents the underlying histogram storage. Two
/// extra buckets are added for underflow and overflow.
fn new_vec(num_buckets: u32) -> Vec<HistogramBucket> {
(0..num_buckets + 2).map(|i| HistogramBucket { index: i, count: 0 }).collect()
}
/// Add a data value to the histogram.
pub fn add_data(&mut self, n: i64) {
// Add one to index to account for underflow bucket at index 0
let mut index = 1 + (n - self.config.floor) / self.config.step_size as i64;
// Clamp index to 0 and self.data.len() - 1, which Cobalt uses for underflow and overflow,
// respectively
index = num_traits::clamp(index, 0, self.data.len() as i64 - 1);
self.data[index as usize].count += 1;
self.data_count += 1;
}
/// Get the number of data elements that have been added to the histogram.
pub fn count(&self) -> u32 {
self.data_count
}
/// Clear the histogram.
pub fn clear(&mut self) {
self.data = Self::new_vec(self.config.num_buckets);
self.data_count = 0;
}
/// Get the underlying Vec<HistogramBucket> of the histogram.
pub fn get_data(&self) -> Vec<HistogramBucket> {
self.data.clone()
}
}
// Finds all of the node config files under the test package's "/config/data" directory. The node
// config files are identified by a suffix of "node_config.json". The function then calls the
// provided `test_config_file` function for each found config file, passing the JSON structure in as
// an argument. The function returns success if each call to `test_config_file` succeeds. Otherwise,
// the first error encountered is returned.
#[cfg(test)]
pub fn test_each_node_config_file(
test_config_file: impl Fn(&Vec<serde_json::Value>) -> Result<(), anyhow::Error>,
) -> Result<(), anyhow::Error> {
use anyhow::Context as _;
use serde_json as json;
use std::fs;
use std::fs::File;
use std::io::BufReader;
let config_files = fs::read_dir("/config/data")
.unwrap()
.filter(|f| f.as_ref().unwrap().file_name().to_str().unwrap().ends_with("node_config.json"))
.map(|f| {
let path = f.unwrap().path();
let file_path = path.to_str().unwrap().to_string();
let json = json::from_reader(BufReader::new(File::open(path).unwrap())).unwrap();
(file_path, json)
})
.collect::<Vec<_>>();
assert!(config_files.len() > 0, "No config files found");
for (file_path, config_file) in config_files {
test_config_file(&config_file).context(format!("Failed for file {}", file_path))?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// CobaltIntHistogram: tests that data added to the CobaltIntHistogram is correctly counted and
/// bucketed.
#[test]
fn test_cobalt_histogram_data() {
// Create the histogram and verify initial data count is 0
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 50,
step_size: 10,
num_buckets: 3,
});
assert_eq!(hist.count(), 0);
// Add some arbitrary values, making sure some do not land on the bucket boundary to further
// verify the bucketing logic
hist.add_data(50);
hist.add_data(65);
hist.add_data(75);
hist.add_data(79);
// Verify the values were counted and bucketed properly
assert_eq!(hist.count(), 4);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 2 },
HistogramBucket { index: 4, count: 0 } // overflow
]
);
// Verify `clear` works as expected
hist.clear();
assert_eq!(hist.count(), 0);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 0 },
HistogramBucket { index: 2, count: 0 },
HistogramBucket { index: 3, count: 0 },
HistogramBucket { index: 4, count: 0 }, // overflow
]
);
}
/// CobaltIntHistogram: tests that invalid data values are logged in the correct
/// underflow/overflow buckets.
#[test]
fn test_cobalt_histogram_invalid_data() {
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 0,
step_size: 1,
num_buckets: 2,
});
hist.add_data(-2);
hist.add_data(-1);
hist.add_data(0);
hist.add_data(1);
hist.add_data(2);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 2 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 1 } // overflow
]
);
}
/// Tests that the `get_current_timestamp` function returns the expected current timestamp.
#[test]
fn test_get_current_timestamp() {
use crate::types::Nanoseconds;
let exec = fuchsia_async::TestExecutor::new_with_fake_time().unwrap();
exec.set_fake_time(fuchsia_async::Time::from_nanos(0));
assert_eq!(get_current_timestamp(), Nanoseconds(0));
exec.set_fake_time(fuchsia_async::Time::from_nanos(1000));
assert_eq!(get_current_timestamp(), Nanoseconds(1000));
}
}
|
{
device_watcher::recursive_wait_and_open_node(
&io_util::open_directory_in_namespace(
"/dev",
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
)?,
path,
)
.await
}
|
identifier_body
|
utils.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Logs an error message if the passed in `result` is an error.
#[macro_export]
macro_rules! log_if_err {
($result:expr, $log_prefix:expr) => {
if let Err(e) = $result.as_ref() {
log::error!("{}: {}", $log_prefix, e);
}
};
}
/// Logs an error message if the provided `cond` evaluates to false. Also passes the same expression
/// and message into `debug_assert!`, which will panic if debug assertions are enabled.
#[macro_export]
macro_rules! log_if_false_and_debug_assert {
($cond:expr, $msg:expr) => {
if!($cond) {
log::error!($msg);
debug_assert!($cond, $msg);
}
};
($cond:expr, $fmt:expr, $($arg:tt)+) => {
if!($cond) {
log::error!($fmt, $($arg)+);
debug_assert!($cond, $fmt, $($arg)+);
}
};
}
#[cfg(test)]
mod log_err_with_debug_assert_tests {
use crate::log_if_false_and_debug_assert;
/// Tests that `log_if_false_and_debug_assert` panics for a false expression when debug
/// assertions are enabled.
#[test]
#[should_panic(expected = "this will panic")]
#[cfg(debug_assertions)]
fn test_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will panic");
}
/// Tests that `log_if_false_and_debug_assert` does not panic for a false expression when debug
/// assertions are not enabled.
#[test]
#[cfg(not(debug_assertions))]
fn test_non_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will not panic either");
}
}
/// Export the `connect_to_driver` function to be used throughout the crate.
pub use connect_to_driver::connect_to_driver;
mod connect_to_driver {
use anyhow::{format_err, Error};
use fidl::endpoints::Proxy as _;
use fidl_fuchsia_io::{NodeProxy, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE};
/// Returns a NodeProxy opened at `path`. The path is guaranteed to exist before the connection
/// is opened.
async fn connect_channel(path: &str) -> Result<NodeProxy, Error> {
device_watcher::recursive_wait_and_open_node(
&io_util::open_directory_in_namespace(
"/dev",
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
)?,
path,
)
.await
}
/// Connects to the driver at `path`, returning a proxy of the specified type. The path is
/// guaranteed to exist before the connection is opened.
///
/// TODO(fxbug.dev/81378): factor this function out to a common library
pub async fn connect_to_driver<T: fidl::endpoints::ProtocolMarker>(
path: &str,
) -> Result<T::Proxy, Error> {
match path.strip_prefix("/dev/") {
Some(path) => fidl::endpoints::ClientEnd::<T>::new(
connect_channel(path)
.await?
.into_channel()
.map_err(|_| format_err!("into_channel failed on NodeProxy"))?
.into_zx_channel(),
)
.into_proxy()
.map_err(Into::into),
None => Err(format_err!("Path must start with /dev/")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_utils::PollExt as _;
use fidl::endpoints::{create_proxy, Proxy, ServerEnd};
use fidl_fuchsia_io::{
DirectoryMarker, NodeMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE,
OPEN_RIGHT_WRITABLE,
};
use fuchsia_async as fasync;
use futures::TryStreamExt as _;
use std::sync::Arc;
use vfs::{
directory::entry::DirectoryEntry, directory::helper::DirectlyMutable,
execution_scope::ExecutionScope, file::vmo::read_only_static, pseudo_directory,
};
fn bind_to_dev(dir: Arc<dyn DirectoryEntry>) {
let (dir_proxy, dir_server) = create_proxy::<DirectoryMarker>().unwrap();
let scope = ExecutionScope::new();
dir.open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir_server.into_channel()),
);
let ns = fdio::Namespace::installed().unwrap();
ns.bind("/dev", dir_proxy.into_channel().unwrap().into_zx_channel()).unwrap();
}
/// Tests that `connect_to_driver` returns success for the valid existing path.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_success() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => pseudo_directory! {
"000" => read_only_static("string beans")
}
}
});
connect_to_driver::<NodeMarker>("/dev/class/thermal/000").await.unwrap();
}
/// Tests that `connect_to_driver` doesn't return until the required path is added.
#[test]
fn test_connect_to_driver_late_add() {
let mut executor = fasync::TestExecutor::new().unwrap();
let thermal_dir = pseudo_directory! {
"000" => read_only_static("string cheese (cheddar)")
};
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => thermal_dir.clone()
}
});
let connect_future =
&mut Box::pin(connect_to_driver::<NodeMarker>("/dev/class/thermal/001"));
// The required path is initially not present
assert!(executor.run_until_stalled(connect_future).is_pending());
// Add the required path
thermal_dir.add_entry("001", read_only_static("string cheese (mozzarella)")).unwrap();
// Verify the wait future now returns successfully
assert!(executor.run_until_stalled(connect_future).unwrap().is_ok());
}
/// Tests that `connect_to_driver` correctly waits even if the required parent directory
/// does not yet exist.
#[test]
fn test_connect_to_driver_nonexistent_parent_dir() {
let mut executor = fasync::TestExecutor::new().unwrap();
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("shoestring fries")
}
}
});
assert!(executor
.run_until_stalled(&mut Box::pin(connect_to_driver::<NodeMarker>(
"/dev/class/thermal/000"
)))
.is_pending());
}
/// Tests that the proxy returned by `connect_to_driver` is usable for sending a FIDL
/// request.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_gives_usable_proxy() {
use fidl_fuchsia_device as fdev;
// Create a pseudo directory with a fuchsia.device.Controller FIDL server hosted at
// class/fake_dev_controller and bind it to our /dev
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory! {
"fake_dev_controller" => vfs::service::host(
move |mut stream: fdev::ControllerRequestStream| {
async move {
match stream.try_next().await.unwrap() {
Some(fdev::ControllerRequest::GetCurrentPerformanceState {
responder
}) => {
let _ = responder.send(8);
}
e => panic!("Unexpected request: {:?}", e),
}
}
}
)
}
});
// Connect to the driver and call GetCurrentPerformanceState on it
let result = crate::utils::connect_to_driver::<fdev::ControllerMarker>(
"/dev/class/fake_dev_controller",
)
.await
.expect("Failed to connect to driver")
.get_current_performance_state()
.await
.expect("get_current_performance_state FIDL failed");
// Verify we receive the expected result
assert_eq!(result, 8);
}
/// Verifies that invalid arguments are rejected while valid ones are accepted.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_valid_path() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("stringtown population 1")
}
}
});
connect_to_driver::<NodeMarker>("/svc/fake_service").await.unwrap_err();
connect_to_driver::<NodeMarker>("/dev/class/cpu/000").await.unwrap();
}
}
}
/// The number of nanoseconds since the system was powered on.
pub fn get_current_timestamp() -> crate::types::Nanoseconds {
crate::types::Nanoseconds(fuchsia_async::Time::now().into_nanos())
}
use fidl_fuchsia_cobalt::HistogramBucket;
/// Convenient wrapper for creating and storing an integer histogram to use with Cobalt.
pub struct CobaltIntHistogram {
/// Underlying histogram data storage.
data: Vec<HistogramBucket>,
/// Number of data values that have been added to the histogram.
data_count: u32,
/// Histogram configuration parameters.
config: CobaltIntHistogramConfig,
}
/// Histogram configuration parameters used by CobaltIntHistogram.
pub struct CobaltIntHistogramConfig {
pub floor: i64,
pub num_buckets: u32,
pub step_size: u32,
}
impl CobaltIntHistogram {
/// Create a new CobaltIntHistogram.
pub fn new(config: CobaltIntHistogramConfig) -> Self {
Self { data: Self::new_vec(config.num_buckets), data_count: 0, config }
}
/// Create a new Vec<HistogramBucket> that represents the underlying histogram storage. Two
/// extra buckets are added for underflow and overflow.
fn new_vec(num_buckets: u32) -> Vec<HistogramBucket> {
(0..num_buckets + 2).map(|i| HistogramBucket { index: i, count: 0 }).collect()
}
/// Add a data value to the histogram.
pub fn add_data(&mut self, n: i64) {
// Add one to index to account for underflow bucket at index 0
let mut index = 1 + (n - self.config.floor) / self.config.step_size as i64;
// Clamp index to 0 and self.data.len() - 1, which Cobalt uses for underflow and overflow,
// respectively
index = num_traits::clamp(index, 0, self.data.len() as i64 - 1);
self.data[index as usize].count += 1;
self.data_count += 1;
}
/// Get the number of data elements that have been added to the histogram.
pub fn count(&self) -> u32 {
self.data_count
}
/// Clear the histogram.
pub fn clear(&mut self) {
self.data = Self::new_vec(self.config.num_buckets);
self.data_count = 0;
}
/// Get the underlying Vec<HistogramBucket> of the histogram.
pub fn get_data(&self) -> Vec<HistogramBucket> {
self.data.clone()
}
}
// Finds all of the node config files under the test package's "/config/data" directory. The node
// config files are identified by a suffix of "node_config.json". The function then calls the
// provided `test_config_file` function for each found config file, passing the JSON structure in as
// an argument. The function returns success if each call to `test_config_file` succeeds. Otherwise,
// the first error encountered is returned.
#[cfg(test)]
pub fn test_each_node_config_file(
test_config_file: impl Fn(&Vec<serde_json::Value>) -> Result<(), anyhow::Error>,
) -> Result<(), anyhow::Error> {
use anyhow::Context as _;
use serde_json as json;
use std::fs;
use std::fs::File;
use std::io::BufReader;
let config_files = fs::read_dir("/config/data")
.unwrap()
.filter(|f| f.as_ref().unwrap().file_name().to_str().unwrap().ends_with("node_config.json"))
.map(|f| {
let path = f.unwrap().path();
let file_path = path.to_str().unwrap().to_string();
let json = json::from_reader(BufReader::new(File::open(path).unwrap())).unwrap();
(file_path, json)
})
.collect::<Vec<_>>();
assert!(config_files.len() > 0, "No config files found");
for (file_path, config_file) in config_files {
test_config_file(&config_file).context(format!("Failed for file {}", file_path))?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// CobaltIntHistogram: tests that data added to the CobaltIntHistogram is correctly counted and
/// bucketed.
#[test]
fn test_cobalt_histogram_data() {
// Create the histogram and verify initial data count is 0
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 50,
step_size: 10,
num_buckets: 3,
});
assert_eq!(hist.count(), 0);
|
// verify the bucketing logic
hist.add_data(50);
hist.add_data(65);
hist.add_data(75);
hist.add_data(79);
// Verify the values were counted and bucketed properly
assert_eq!(hist.count(), 4);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 2 },
HistogramBucket { index: 4, count: 0 } // overflow
]
);
// Verify `clear` works as expected
hist.clear();
assert_eq!(hist.count(), 0);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 0 },
HistogramBucket { index: 2, count: 0 },
HistogramBucket { index: 3, count: 0 },
HistogramBucket { index: 4, count: 0 }, // overflow
]
);
}
/// CobaltIntHistogram: tests that invalid data values are logged in the correct
/// underflow/overflow buckets.
#[test]
fn test_cobalt_histogram_invalid_data() {
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 0,
step_size: 1,
num_buckets: 2,
});
hist.add_data(-2);
hist.add_data(-1);
hist.add_data(0);
hist.add_data(1);
hist.add_data(2);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 2 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 1 } // overflow
]
);
}
/// Tests that the `get_current_timestamp` function returns the expected current timestamp.
#[test]
fn test_get_current_timestamp() {
use crate::types::Nanoseconds;
let exec = fuchsia_async::TestExecutor::new_with_fake_time().unwrap();
exec.set_fake_time(fuchsia_async::Time::from_nanos(0));
assert_eq!(get_current_timestamp(), Nanoseconds(0));
exec.set_fake_time(fuchsia_async::Time::from_nanos(1000));
assert_eq!(get_current_timestamp(), Nanoseconds(1000));
}
}
|
// Add some arbitrary values, making sure some do not land on the bucket boundary to further
|
random_line_split
|
utils.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Logs an error message if the passed in `result` is an error.
#[macro_export]
macro_rules! log_if_err {
($result:expr, $log_prefix:expr) => {
if let Err(e) = $result.as_ref() {
log::error!("{}: {}", $log_prefix, e);
}
};
}
/// Logs an error message if the provided `cond` evaluates to false. Also passes the same expression
/// and message into `debug_assert!`, which will panic if debug assertions are enabled.
#[macro_export]
macro_rules! log_if_false_and_debug_assert {
($cond:expr, $msg:expr) => {
if!($cond) {
log::error!($msg);
debug_assert!($cond, $msg);
}
};
($cond:expr, $fmt:expr, $($arg:tt)+) => {
if!($cond) {
log::error!($fmt, $($arg)+);
debug_assert!($cond, $fmt, $($arg)+);
}
};
}
#[cfg(test)]
mod log_err_with_debug_assert_tests {
use crate::log_if_false_and_debug_assert;
/// Tests that `log_if_false_and_debug_assert` panics for a false expression when debug
/// assertions are enabled.
#[test]
#[should_panic(expected = "this will panic")]
#[cfg(debug_assertions)]
fn test_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will panic");
}
/// Tests that `log_if_false_and_debug_assert` does not panic for a false expression when debug
/// assertions are not enabled.
#[test]
#[cfg(not(debug_assertions))]
fn test_non_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will not panic either");
}
}
/// Export the `connect_to_driver` function to be used throughout the crate.
pub use connect_to_driver::connect_to_driver;
mod connect_to_driver {
use anyhow::{format_err, Error};
use fidl::endpoints::Proxy as _;
use fidl_fuchsia_io::{NodeProxy, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE};
/// Returns a NodeProxy opened at `path`. The path is guaranteed to exist before the connection
/// is opened.
async fn connect_channel(path: &str) -> Result<NodeProxy, Error> {
device_watcher::recursive_wait_and_open_node(
&io_util::open_directory_in_namespace(
"/dev",
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
)?,
path,
)
.await
}
/// Connects to the driver at `path`, returning a proxy of the specified type. The path is
/// guaranteed to exist before the connection is opened.
///
/// TODO(fxbug.dev/81378): factor this function out to a common library
pub async fn connect_to_driver<T: fidl::endpoints::ProtocolMarker>(
path: &str,
) -> Result<T::Proxy, Error> {
match path.strip_prefix("/dev/") {
Some(path) => fidl::endpoints::ClientEnd::<T>::new(
connect_channel(path)
.await?
.into_channel()
.map_err(|_| format_err!("into_channel failed on NodeProxy"))?
.into_zx_channel(),
)
.into_proxy()
.map_err(Into::into),
None => Err(format_err!("Path must start with /dev/")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_utils::PollExt as _;
use fidl::endpoints::{create_proxy, Proxy, ServerEnd};
use fidl_fuchsia_io::{
DirectoryMarker, NodeMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE,
OPEN_RIGHT_WRITABLE,
};
use fuchsia_async as fasync;
use futures::TryStreamExt as _;
use std::sync::Arc;
use vfs::{
directory::entry::DirectoryEntry, directory::helper::DirectlyMutable,
execution_scope::ExecutionScope, file::vmo::read_only_static, pseudo_directory,
};
fn bind_to_dev(dir: Arc<dyn DirectoryEntry>) {
let (dir_proxy, dir_server) = create_proxy::<DirectoryMarker>().unwrap();
let scope = ExecutionScope::new();
dir.open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir_server.into_channel()),
);
let ns = fdio::Namespace::installed().unwrap();
ns.bind("/dev", dir_proxy.into_channel().unwrap().into_zx_channel()).unwrap();
}
/// Tests that `connect_to_driver` returns success for the valid existing path.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_success() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => pseudo_directory! {
"000" => read_only_static("string beans")
}
}
});
connect_to_driver::<NodeMarker>("/dev/class/thermal/000").await.unwrap();
}
/// Tests that `connect_to_driver` doesn't return until the required path is added.
#[test]
fn test_connect_to_driver_late_add() {
let mut executor = fasync::TestExecutor::new().unwrap();
let thermal_dir = pseudo_directory! {
"000" => read_only_static("string cheese (cheddar)")
};
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => thermal_dir.clone()
}
});
let connect_future =
&mut Box::pin(connect_to_driver::<NodeMarker>("/dev/class/thermal/001"));
// The required path is initially not present
assert!(executor.run_until_stalled(connect_future).is_pending());
// Add the required path
thermal_dir.add_entry("001", read_only_static("string cheese (mozzarella)")).unwrap();
// Verify the wait future now returns successfully
assert!(executor.run_until_stalled(connect_future).unwrap().is_ok());
}
/// Tests that `connect_to_driver` correctly waits even if the required parent directory
/// does not yet exist.
#[test]
fn test_connect_to_driver_nonexistent_parent_dir() {
let mut executor = fasync::TestExecutor::new().unwrap();
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("shoestring fries")
}
}
});
assert!(executor
.run_until_stalled(&mut Box::pin(connect_to_driver::<NodeMarker>(
"/dev/class/thermal/000"
)))
.is_pending());
}
/// Tests that the proxy returned by `connect_to_driver` is usable for sending a FIDL
/// request.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_gives_usable_proxy() {
use fidl_fuchsia_device as fdev;
// Create a pseudo directory with a fuchsia.device.Controller FIDL server hosted at
// class/fake_dev_controller and bind it to our /dev
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory! {
"fake_dev_controller" => vfs::service::host(
move |mut stream: fdev::ControllerRequestStream| {
async move {
match stream.try_next().await.unwrap() {
Some(fdev::ControllerRequest::GetCurrentPerformanceState {
responder
}) => {
let _ = responder.send(8);
}
e => panic!("Unexpected request: {:?}", e),
}
}
}
)
}
});
// Connect to the driver and call GetCurrentPerformanceState on it
let result = crate::utils::connect_to_driver::<fdev::ControllerMarker>(
"/dev/class/fake_dev_controller",
)
.await
.expect("Failed to connect to driver")
.get_current_performance_state()
.await
.expect("get_current_performance_state FIDL failed");
// Verify we receive the expected result
assert_eq!(result, 8);
}
/// Verifies that invalid arguments are rejected while valid ones are accepted.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_valid_path() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("stringtown population 1")
}
}
});
connect_to_driver::<NodeMarker>("/svc/fake_service").await.unwrap_err();
connect_to_driver::<NodeMarker>("/dev/class/cpu/000").await.unwrap();
}
}
}
/// The number of nanoseconds since the system was powered on.
pub fn get_current_timestamp() -> crate::types::Nanoseconds {
crate::types::Nanoseconds(fuchsia_async::Time::now().into_nanos())
}
use fidl_fuchsia_cobalt::HistogramBucket;
/// Convenient wrapper for creating and storing an integer histogram to use with Cobalt.
pub struct CobaltIntHistogram {
/// Underlying histogram data storage.
data: Vec<HistogramBucket>,
/// Number of data values that have been added to the histogram.
data_count: u32,
/// Histogram configuration parameters.
config: CobaltIntHistogramConfig,
}
/// Histogram configuration parameters used by CobaltIntHistogram.
pub struct CobaltIntHistogramConfig {
pub floor: i64,
pub num_buckets: u32,
pub step_size: u32,
}
impl CobaltIntHistogram {
/// Create a new CobaltIntHistogram.
pub fn new(config: CobaltIntHistogramConfig) -> Self {
Self { data: Self::new_vec(config.num_buckets), data_count: 0, config }
}
/// Create a new Vec<HistogramBucket> that represents the underlying histogram storage. Two
/// extra buckets are added for underflow and overflow.
fn
|
(num_buckets: u32) -> Vec<HistogramBucket> {
(0..num_buckets + 2).map(|i| HistogramBucket { index: i, count: 0 }).collect()
}
/// Add a data value to the histogram.
pub fn add_data(&mut self, n: i64) {
// Add one to index to account for underflow bucket at index 0
let mut index = 1 + (n - self.config.floor) / self.config.step_size as i64;
// Clamp index to 0 and self.data.len() - 1, which Cobalt uses for underflow and overflow,
// respectively
index = num_traits::clamp(index, 0, self.data.len() as i64 - 1);
self.data[index as usize].count += 1;
self.data_count += 1;
}
/// Get the number of data elements that have been added to the histogram.
pub fn count(&self) -> u32 {
self.data_count
}
/// Clear the histogram.
pub fn clear(&mut self) {
self.data = Self::new_vec(self.config.num_buckets);
self.data_count = 0;
}
/// Get the underlying Vec<HistogramBucket> of the histogram.
pub fn get_data(&self) -> Vec<HistogramBucket> {
self.data.clone()
}
}
// Finds all of the node config files under the test package's "/config/data" directory. The node
// config files are identified by a suffix of "node_config.json". The function then calls the
// provided `test_config_file` function for each found config file, passing the JSON structure in as
// an argument. The function returns success if each call to `test_config_file` succeeds. Otherwise,
// the first error encountered is returned.
#[cfg(test)]
pub fn test_each_node_config_file(
test_config_file: impl Fn(&Vec<serde_json::Value>) -> Result<(), anyhow::Error>,
) -> Result<(), anyhow::Error> {
use anyhow::Context as _;
use serde_json as json;
use std::fs;
use std::fs::File;
use std::io::BufReader;
let config_files = fs::read_dir("/config/data")
.unwrap()
.filter(|f| f.as_ref().unwrap().file_name().to_str().unwrap().ends_with("node_config.json"))
.map(|f| {
let path = f.unwrap().path();
let file_path = path.to_str().unwrap().to_string();
let json = json::from_reader(BufReader::new(File::open(path).unwrap())).unwrap();
(file_path, json)
})
.collect::<Vec<_>>();
assert!(config_files.len() > 0, "No config files found");
for (file_path, config_file) in config_files {
test_config_file(&config_file).context(format!("Failed for file {}", file_path))?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// CobaltIntHistogram: tests that data added to the CobaltIntHistogram is correctly counted and
/// bucketed.
#[test]
fn test_cobalt_histogram_data() {
// Create the histogram and verify initial data count is 0
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 50,
step_size: 10,
num_buckets: 3,
});
assert_eq!(hist.count(), 0);
// Add some arbitrary values, making sure some do not land on the bucket boundary to further
// verify the bucketing logic
hist.add_data(50);
hist.add_data(65);
hist.add_data(75);
hist.add_data(79);
// Verify the values were counted and bucketed properly
assert_eq!(hist.count(), 4);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 2 },
HistogramBucket { index: 4, count: 0 } // overflow
]
);
// Verify `clear` works as expected
hist.clear();
assert_eq!(hist.count(), 0);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 0 },
HistogramBucket { index: 2, count: 0 },
HistogramBucket { index: 3, count: 0 },
HistogramBucket { index: 4, count: 0 }, // overflow
]
);
}
/// CobaltIntHistogram: tests that invalid data values are logged in the correct
/// underflow/overflow buckets.
#[test]
fn test_cobalt_histogram_invalid_data() {
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 0,
step_size: 1,
num_buckets: 2,
});
hist.add_data(-2);
hist.add_data(-1);
hist.add_data(0);
hist.add_data(1);
hist.add_data(2);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 2 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 1 } // overflow
]
);
}
/// Tests that the `get_current_timestamp` function returns the expected current timestamp.
#[test]
fn test_get_current_timestamp() {
use crate::types::Nanoseconds;
let exec = fuchsia_async::TestExecutor::new_with_fake_time().unwrap();
exec.set_fake_time(fuchsia_async::Time::from_nanos(0));
assert_eq!(get_current_timestamp(), Nanoseconds(0));
exec.set_fake_time(fuchsia_async::Time::from_nanos(1000));
assert_eq!(get_current_timestamp(), Nanoseconds(1000));
}
}
|
new_vec
|
identifier_name
|
macros.rs
|
/// Prints to [`stdout`][crate::stdout].
///
/// Equivalent to the [`println!`] macro except that a newline is not printed at
/// the end of the message.
///
/// Note that stdout is frequently line-buffered by default so it may be
/// necessary to use [`std::io::Write::flush()`] to ensure the output is emitted
/// immediately.
///
/// **NOTE:** The `print!` macro will lock the standard output on each call. If you call
/// `print!` within a hot loop, this behavior may be the bottleneck of the loop.
/// To avoid this, lock stdout with [`AutoStream::lock`][crate::AutoStream::lock]:
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
///
/// let mut lock = anstream::stdout().lock();
/// write!(lock, "hello world").unwrap();
/// # }
/// ```
///
/// Use `print!` only for the primary output of your program. Use
/// [`eprint!`] instead to print error and progress messages.
///
/// # Panics
///
/// Panics if writing to `stdout` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
/// use anstream::print;
/// use anstream::stdout;
|
/// print!("on ");
/// print!("the ");
/// print!("same ");
/// print!("line ");
///
/// stdout().flush().unwrap();
///
/// print!("this string has a newline, why not choose println! instead?\n");
///
/// stdout().flush().unwrap();
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! print {
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stdout();
match ::std::write!(&mut stream, $($arg)*) {
Err(e) if e.kind()!= ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Prints to [`stdout`][crate::stdout], with a newline.
///
/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
/// (no additional CARRIAGE RETURN (`\r`/`U+000D`)).
///
/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
/// See [`std::fmt`] for more information.
///
/// **NOTE:** The `println!` macro will lock the standard output on each call. If you call
/// `println!` within a hot loop, this behavior may be the bottleneck of the loop.
/// To avoid this, lock stdout with [`AutoStream::lock`][crate::AutoStream::lock]:
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
///
/// let mut lock = anstream::stdout().lock();
/// writeln!(lock, "hello world").unwrap();
/// # }
/// ```
///
/// Use `println!` only for the primary output of your program. Use
/// [`eprintln!`] instead to print error and progress messages.
///
/// # Panics
///
/// Panics if writing to `stdout` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::println;
///
/// println!(); // prints just a newline
/// println!("hello there!");
/// println!("format {} arguments", "some");
/// let local_variable = "some";
/// println!("format {local_variable} arguments");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! println {
() => {
$crate::print!("\n")
};
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stdout();
match ::std::writeln!(&mut stream, $($arg)*) {
Err(e) if e.kind()!= ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Prints to [`stderr`][crate::stderr].
///
/// Equivalent to the [`print!`] macro, except that output goes to
/// `stderr` instead of `stdout`. See [`print!`] for
/// example usage.
///
/// Use `eprint!` only for error and progress messages. Use `print!`
/// instead for the primary output of your program.
///
/// # Panics
///
/// Panics if writing to `stderr` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::eprint;
///
/// eprint!("Error: Could not complete task");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! eprint {
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stderr();
match ::std::write!(&mut stream, $($arg)*) {
Err(e) if e.kind()!= ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Prints to [`stderr`][crate::stderr], with a newline.
///
/// Equivalent to the [`println!`] macro, except that output goes to
/// `stderr` instead of `stdout`. See [`println!`] for
/// example usage.
///
/// Use `eprintln!` only for error and progress messages. Use `println!`
/// instead for the primary output of your program.
///
/// # Panics
///
/// Panics if writing to `stderr` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::eprintln;
///
/// eprintln!("Error: Could not complete task");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! eprintln {
() => {
$crate::eprint!("\n")
};
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stderr();
match ::std::writeln!(&mut stream, $($arg)*) {
Err(e) if e.kind()!= ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Panics the current thread.
///
/// This allows a program to terminate immediately and provide feedback
/// to the caller of the program.
///
/// This macro is the perfect way to assert conditions in example code and in
/// tests. `panic!` is closely tied with the `unwrap` method of both
/// [`Option`][ounwrap] and [`Result`][runwrap] enums. Both implementations call
/// `panic!` when they are set to [`None`] or [`Err`] variants.
///
/// When using `panic!()` you can specify a string payload, that is built using
/// the [`format!`] syntax. That payload is used when injecting the panic into
/// the calling Rust thread, causing the thread to panic entirely.
///
/// The behavior of the default `std` hook, i.e. the code that runs directly
/// after the panic is invoked, is to print the message payload to
/// `stderr` along with the file/line/column information of the `panic!()`
/// call. You can override the panic hook using [`std::panic::set_hook()`].
/// Inside the hook a panic can be accessed as a `&dyn Any + Send`,
/// which contains either a `&str` or `String` for regular `panic!()` invocations.
/// To panic with a value of another other type, [`panic_any`] can be used.
///
/// See also the macro [`compile_error!`], for raising errors during compilation.
///
/// # When to use `panic!` vs `Result`
///
/// The Rust language provides two complementary systems for constructing /
/// representing, reporting, propagating, reacting to, and discarding errors. These
/// responsibilities are collectively known as "error handling." `panic!` and
/// `Result` are similar in that they are each the primary interface of their
/// respective error handling systems; however, the meaning these interfaces attach
/// to their errors and the responsibilities they fulfill within their respective
/// error handling systems differ.
///
/// The `panic!` macro is used to construct errors that represent a bug that has
/// been detected in your program. With `panic!` you provide a message that
/// describes the bug and the language then constructs an error with that message,
/// reports it, and propagates it for you.
///
/// `Result` on the other hand is used to wrap other types that represent either
/// the successful result of some computation, `Ok(T)`, or error types that
/// represent an anticipated runtime failure mode of that computation, `Err(E)`.
/// `Result` is used alongside user defined types which represent the various
/// anticipated runtime failure modes that the associated computation could
/// encounter. `Result` must be propagated manually, often with the the help of the
/// `?` operator and `Try` trait, and they must be reported manually, often with
/// the help of the `Error` trait.
///
/// For more detailed information about error handling check out the [book] or the
/// [`std::result`] module docs.
///
/// [ounwrap]: Option::unwrap
/// [runwrap]: Result::unwrap
/// [`std::panic::set_hook()`]:../std/panic/fn.set_hook.html
/// [`panic_any`]:../std/panic/fn.panic_any.html
/// [`Box`]:../std/boxed/struct.Box.html
/// [`Any`]: crate::any::Any
/// [`format!`]:../std/macro.format.html
/// [book]:../book/ch09-00-error-handling.html
/// [`std::result`]:../std/result/index.html
///
/// # Current implementation
///
/// If the main thread panics it will terminate all your threads and end your
/// program with code `101`.
///
/// # Examples
///
/// ```should_panic
/// # #![allow(unreachable_code)]
/// use anstream::panic;
/// panic!();
/// panic!("this is a terrible mistake!");
/// panic!("this is a {} {message}", "fancy", message = "message");
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! panic {
() => {
::std::panic!()
};
($($arg:tt)*) => {{
use std::io::Write as _;
let panic_stream = std::io::stderr();
let choice = $crate::AutoStream::choice(&panic_stream);
let buffer = $crate::Buffer::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(buffer.as_bytes()).into_owned();
::std::panic!("{}", buffer)
}};
}
|
///
/// print!("this ");
/// print!("will ");
/// print!("be ");
|
random_line_split
|
pipe.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::OsStr;
use io;
use mem;
use path::Path;
use ptr;
use rand::{self, Rng};
use slice;
use sys::c;
use sys::fs::{File, OpenOptions};
use sys::handle::Handle;
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
////////////////////////////////////////////////////////////////////////////////
pub struct AnonPipe {
inner: Handle,
}
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
// Note that we specifically do *not* use `CreatePipe` here because
// unfortunately the anonymous pipes returned do not support overlapped
// operations.
//
// Instead, we create a "hopefully unique" name and create a named pipe
// which has overlapped operations enabled.
//
// Once we do this, we connect do it as usual via `CreateFileW`, and then we
// return those reader/writer halves.
unsafe {
let reader;
let mut name;
let mut tries = 0;
loop {
tries += 1;
let key: u64 = rand::thread_rng().gen();
name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
c::GetCurrentProcessId(),
key);
let wide_name = OsStr::new(&name)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
c::PIPE_ACCESS_INBOUND |
c::FILE_FLAG_FIRST_PIPE_INSTANCE |
c::FILE_FLAG_OVERLAPPED,
c::PIPE_TYPE_BYTE |
c::PIPE_READMODE_BYTE |
c::PIPE_WAIT |
c::PIPE_REJECT_REMOTE_CLIENTS,
1,
4096,
4096,
0,
ptr::null_mut());
// We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
// also just doing a best effort at selecting a unique name. If
// ERROR_ACCESS_DENIED is returned then it could mean that we
// accidentally conflicted with an already existing pipe, so we try
// again.
//
// Don't try again too much though as this could also perhaps be a
// legit error.
if handle == c::INVALID_HANDLE_VALUE {
let err = io::Error::last_os_error();
if tries < 10 &&
err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
continue
}
return Err(err)
}
reader = Handle::new(handle);
break
}
// Connect to the named pipe we just created in write-only mode (also
// overlapped for async I/O below).
let mut opts = OpenOptions::new();
opts.write(true);
opts.read(false);
opts.share_mode(0);
opts.attributes(c::FILE_FLAG_OVERLAPPED);
let writer = File::open(Path::new(&name), &opts)?;
let writer = AnonPipe { inner: writer.into_handle() };
Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle { &self.inner }
pub fn into_handle(self) -> Handle { self.inner }
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
}
pub fn read2(p1: AnonPipe,
v1: &mut Vec<u8>,
p2: AnonPipe,
v2: &mut Vec<u8>) -> io::Result<()> {
let p1 = p1.into_handle();
let p2 = p2.into_handle();
let mut p1 = AsyncPipe::new(p1, v1)?;
let mut p2 = AsyncPipe::new(p2, v2)?;
let objs = [p1.event.raw(), p2.event.raw()];
// In a loop we wait for either pipe's scheduled read operation to complete.
// If the operation completes with 0 bytes, that means EOF was reached, in
// which case we just finish out the other pipe entirely.
//
// Note that overlapped I/O is in general super unsafe because we have to
// be careful to ensure that all pointers in play are valid for the entire
// duration of the I/O operation (where tons of operations can also fail).
// The destructor for `AsyncPipe` ends up taking care of most of this.
loop {
let res = unsafe {
c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
};
if res == c::WAIT_OBJECT_0 {
if!p1.result()? ||!p1.schedule_read()? {
return p2.finish()
}
} else if res == c::WAIT_OBJECT_0 + 1 {
if!p2.result()? ||!p2.schedule_read()? {
return p1.finish()
}
} else {
return Err(io::Error::last_os_error())
}
}
}
struct AsyncPipe<'a> {
pipe: Handle,
event: Handle,
overlapped: Box<c::OVERLAPPED>, // needs a stable address
dst: &'a mut Vec<u8>,
state: State,
}
#[derive(PartialEq, Debug)]
enum State {
NotReading,
Reading,
Read(usize),
}
impl<'a> AsyncPipe<'a> {
fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
// Create an event which we'll use to coordinate our overlapped
// opreations, this event will be used in WaitForMultipleObjects
// and passed as part of the OVERLAPPED handle.
//
// Note that we do a somewhat clever thing here by flagging the
// event as being manually reset and setting it initially to the
// signaled state. This means that we'll naturally fall through the
// WaitForMultipleObjects call above for pipes created initially,
// and the only time an even will go back to "unset" will be once an
// I/O operation is successfully scheduled (what we want).
let event = Handle::new_event(true, true)?;
let mut overlapped: Box<c::OVERLAPPED> = unsafe {
Box::new(mem::zeroed())
};
overlapped.hEvent = event.raw();
Ok(AsyncPipe {
pipe: pipe,
overlapped: overlapped,
event: event,
dst: dst,
state: State::NotReading,
})
}
/// Executes an overlapped read operation.
///
/// Must not currently be reading, and returns whether the pipe is currently
/// at EOF or not. If the pipe is not at EOF then `result()` must be called
/// to complete the read later on (may block), but if the pipe is at EOF
/// then `result()` should not be called as it will just block forever.
fn schedule_read(&mut self) -> io::Result<bool> {
assert_eq!(self.state, State::NotReading);
let amt = unsafe {
let slice = slice_to_end(self.dst);
self.pipe.read_overlapped(slice, &mut *self.overlapped)?
};
// If this read finished immediately then our overlapped event will
// remain signaled (it was signaled coming in here) and we'll progress
// down to the method below.
//
// Otherwise the I/O operation is scheduled and the system set our event
// to not signaled, so we flag ourselves into the reading state and move
// on.
self.state = match amt {
Some(0) => return Ok(false),
Some(amt) => State::Read(amt),
None => State::Reading,
};
Ok(true)
}
/// Wait for the result of the overlapped operation previously executed.
///
/// Takes a parameter `wait` which indicates if this pipe is currently being
/// read whether the function should block waiting for the read to complete.
///
/// Return values:
///
/// * `true` - finished any pending read and the pipe is not at EOF (keep
/// going)
/// * `false` - finished any pending read and pipe is at EOF (stop issuing
/// reads)
fn
|
(&mut self) -> io::Result<bool> {
let amt = match self.state {
State::NotReading => return Ok(true),
State::Reading => {
self.pipe.overlapped_result(&mut *self.overlapped, true)?
}
State::Read(amt) => amt,
};
self.state = State::NotReading;
unsafe {
let len = self.dst.len();
self.dst.set_len(len + amt);
}
Ok(amt!= 0)
}
/// Finishes out reading this pipe entirely.
///
/// Waits for any pending and schedule read, and then calls `read_to_end`
/// if necessary to read all the remaining information.
fn finish(&mut self) -> io::Result<()> {
while self.result()? && self.schedule_read()? {
//...
}
Ok(())
}
}
impl<'a> Drop for AsyncPipe<'a> {
fn drop(&mut self) {
match self.state {
State::Reading => {}
_ => return,
}
// If we have a pending read operation, then we have to make sure that
// it's *done* before we actually drop this type. The kernel requires
// that the `OVERLAPPED` and buffer pointers are valid for the entire
// I/O operation.
//
// To do that, we call `CancelIo` to cancel any pending operation, and
// if that succeeds we wait for the overlapped result.
//
// If anything here fails, there's not really much we can do, so we leak
// the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
if self.pipe.cancel_io().is_err() || self.result().is_err() {
let buf = mem::replace(self.dst, Vec::new());
let overlapped = Box::new(unsafe { mem::zeroed() });
let overlapped = mem::replace(&mut self.overlapped, overlapped);
mem::forget((buf, overlapped));
}
}
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
|
result
|
identifier_name
|
pipe.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::OsStr;
use io;
use mem;
use path::Path;
use ptr;
use rand::{self, Rng};
use slice;
use sys::c;
use sys::fs::{File, OpenOptions};
use sys::handle::Handle;
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
////////////////////////////////////////////////////////////////////////////////
pub struct AnonPipe {
inner: Handle,
}
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
// Note that we specifically do *not* use `CreatePipe` here because
// unfortunately the anonymous pipes returned do not support overlapped
// operations.
//
// Instead, we create a "hopefully unique" name and create a named pipe
// which has overlapped operations enabled.
//
// Once we do this, we connect do it as usual via `CreateFileW`, and then we
// return those reader/writer halves.
unsafe {
let reader;
let mut name;
let mut tries = 0;
loop {
tries += 1;
let key: u64 = rand::thread_rng().gen();
name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
c::GetCurrentProcessId(),
key);
let wide_name = OsStr::new(&name)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
c::PIPE_ACCESS_INBOUND |
c::FILE_FLAG_FIRST_PIPE_INSTANCE |
c::FILE_FLAG_OVERLAPPED,
c::PIPE_TYPE_BYTE |
c::PIPE_READMODE_BYTE |
c::PIPE_WAIT |
c::PIPE_REJECT_REMOTE_CLIENTS,
1,
4096,
4096,
0,
ptr::null_mut());
// We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
// also just doing a best effort at selecting a unique name. If
// ERROR_ACCESS_DENIED is returned then it could mean that we
// accidentally conflicted with an already existing pipe, so we try
// again.
//
// Don't try again too much though as this could also perhaps be a
// legit error.
if handle == c::INVALID_HANDLE_VALUE {
let err = io::Error::last_os_error();
if tries < 10 &&
err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
continue
}
return Err(err)
}
reader = Handle::new(handle);
break
}
// Connect to the named pipe we just created in write-only mode (also
// overlapped for async I/O below).
let mut opts = OpenOptions::new();
opts.write(true);
opts.read(false);
opts.share_mode(0);
opts.attributes(c::FILE_FLAG_OVERLAPPED);
let writer = File::open(Path::new(&name), &opts)?;
let writer = AnonPipe { inner: writer.into_handle() };
Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle { &self.inner }
pub fn into_handle(self) -> Handle { self.inner }
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
}
pub fn read2(p1: AnonPipe,
v1: &mut Vec<u8>,
p2: AnonPipe,
v2: &mut Vec<u8>) -> io::Result<()> {
let p1 = p1.into_handle();
let p2 = p2.into_handle();
let mut p1 = AsyncPipe::new(p1, v1)?;
let mut p2 = AsyncPipe::new(p2, v2)?;
let objs = [p1.event.raw(), p2.event.raw()];
// In a loop we wait for either pipe's scheduled read operation to complete.
// If the operation completes with 0 bytes, that means EOF was reached, in
// which case we just finish out the other pipe entirely.
//
// Note that overlapped I/O is in general super unsafe because we have to
// be careful to ensure that all pointers in play are valid for the entire
// duration of the I/O operation (where tons of operations can also fail).
// The destructor for `AsyncPipe` ends up taking care of most of this.
loop {
let res = unsafe {
c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
};
if res == c::WAIT_OBJECT_0 {
if!p1.result()? ||!p1.schedule_read()? {
return p2.finish()
}
} else if res == c::WAIT_OBJECT_0 + 1 {
if!p2.result()? ||!p2.schedule_read()? {
return p1.finish()
}
} else {
return Err(io::Error::last_os_error())
}
}
}
struct AsyncPipe<'a> {
pipe: Handle,
event: Handle,
overlapped: Box<c::OVERLAPPED>, // needs a stable address
dst: &'a mut Vec<u8>,
state: State,
}
#[derive(PartialEq, Debug)]
enum State {
NotReading,
Reading,
Read(usize),
}
impl<'a> AsyncPipe<'a> {
fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
// Create an event which we'll use to coordinate our overlapped
// opreations, this event will be used in WaitForMultipleObjects
// and passed as part of the OVERLAPPED handle.
//
// Note that we do a somewhat clever thing here by flagging the
// event as being manually reset and setting it initially to the
// signaled state. This means that we'll naturally fall through the
// WaitForMultipleObjects call above for pipes created initially,
// and the only time an even will go back to "unset" will be once an
// I/O operation is successfully scheduled (what we want).
let event = Handle::new_event(true, true)?;
let mut overlapped: Box<c::OVERLAPPED> = unsafe {
Box::new(mem::zeroed())
};
overlapped.hEvent = event.raw();
Ok(AsyncPipe {
pipe: pipe,
overlapped: overlapped,
event: event,
dst: dst,
state: State::NotReading,
})
}
/// Executes an overlapped read operation.
///
/// Must not currently be reading, and returns whether the pipe is currently
/// at EOF or not. If the pipe is not at EOF then `result()` must be called
/// to complete the read later on (may block), but if the pipe is at EOF
/// then `result()` should not be called as it will just block forever.
fn schedule_read(&mut self) -> io::Result<bool> {
assert_eq!(self.state, State::NotReading);
let amt = unsafe {
let slice = slice_to_end(self.dst);
self.pipe.read_overlapped(slice, &mut *self.overlapped)?
};
// If this read finished immediately then our overlapped event will
// remain signaled (it was signaled coming in here) and we'll progress
// down to the method below.
//
// Otherwise the I/O operation is scheduled and the system set our event
// to not signaled, so we flag ourselves into the reading state and move
// on.
self.state = match amt {
Some(0) => return Ok(false),
Some(amt) => State::Read(amt),
None => State::Reading,
};
Ok(true)
}
/// Wait for the result of the overlapped operation previously executed.
///
/// Takes a parameter `wait` which indicates if this pipe is currently being
/// read whether the function should block waiting for the read to complete.
///
/// Return values:
///
/// * `true` - finished any pending read and the pipe is not at EOF (keep
/// going)
/// * `false` - finished any pending read and pipe is at EOF (stop issuing
/// reads)
fn result(&mut self) -> io::Result<bool> {
let amt = match self.state {
State::NotReading => return Ok(true),
State::Reading => {
self.pipe.overlapped_result(&mut *self.overlapped, true)?
}
State::Read(amt) => amt,
};
self.state = State::NotReading;
unsafe {
let len = self.dst.len();
self.dst.set_len(len + amt);
}
Ok(amt!= 0)
}
/// Finishes out reading this pipe entirely.
///
/// Waits for any pending and schedule read, and then calls `read_to_end`
/// if necessary to read all the remaining information.
fn finish(&mut self) -> io::Result<()> {
|
}
}
impl<'a> Drop for AsyncPipe<'a> {
fn drop(&mut self) {
match self.state {
State::Reading => {}
_ => return,
}
// If we have a pending read operation, then we have to make sure that
// it's *done* before we actually drop this type. The kernel requires
// that the `OVERLAPPED` and buffer pointers are valid for the entire
// I/O operation.
//
// To do that, we call `CancelIo` to cancel any pending operation, and
// if that succeeds we wait for the overlapped result.
//
// If anything here fails, there's not really much we can do, so we leak
// the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
if self.pipe.cancel_io().is_err() || self.result().is_err() {
let buf = mem::replace(self.dst, Vec::new());
let overlapped = Box::new(unsafe { mem::zeroed() });
let overlapped = mem::replace(&mut self.overlapped, overlapped);
mem::forget((buf, overlapped));
}
}
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
|
while self.result()? && self.schedule_read()? {
// ...
}
Ok(())
|
random_line_split
|
pipe.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::OsStr;
use io;
use mem;
use path::Path;
use ptr;
use rand::{self, Rng};
use slice;
use sys::c;
use sys::fs::{File, OpenOptions};
use sys::handle::Handle;
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
////////////////////////////////////////////////////////////////////////////////
pub struct AnonPipe {
inner: Handle,
}
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
// Note that we specifically do *not* use `CreatePipe` here because
// unfortunately the anonymous pipes returned do not support overlapped
// operations.
//
// Instead, we create a "hopefully unique" name and create a named pipe
// which has overlapped operations enabled.
//
// Once we do this, we connect do it as usual via `CreateFileW`, and then we
// return those reader/writer halves.
unsafe {
let reader;
let mut name;
let mut tries = 0;
loop {
tries += 1;
let key: u64 = rand::thread_rng().gen();
name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
c::GetCurrentProcessId(),
key);
let wide_name = OsStr::new(&name)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
c::PIPE_ACCESS_INBOUND |
c::FILE_FLAG_FIRST_PIPE_INSTANCE |
c::FILE_FLAG_OVERLAPPED,
c::PIPE_TYPE_BYTE |
c::PIPE_READMODE_BYTE |
c::PIPE_WAIT |
c::PIPE_REJECT_REMOTE_CLIENTS,
1,
4096,
4096,
0,
ptr::null_mut());
// We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
// also just doing a best effort at selecting a unique name. If
// ERROR_ACCESS_DENIED is returned then it could mean that we
// accidentally conflicted with an already existing pipe, so we try
// again.
//
// Don't try again too much though as this could also perhaps be a
// legit error.
if handle == c::INVALID_HANDLE_VALUE {
let err = io::Error::last_os_error();
if tries < 10 &&
err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
continue
}
return Err(err)
}
reader = Handle::new(handle);
break
}
// Connect to the named pipe we just created in write-only mode (also
// overlapped for async I/O below).
let mut opts = OpenOptions::new();
opts.write(true);
opts.read(false);
opts.share_mode(0);
opts.attributes(c::FILE_FLAG_OVERLAPPED);
let writer = File::open(Path::new(&name), &opts)?;
let writer = AnonPipe { inner: writer.into_handle() };
Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle
|
pub fn into_handle(self) -> Handle { self.inner }
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
}
pub fn read2(p1: AnonPipe,
v1: &mut Vec<u8>,
p2: AnonPipe,
v2: &mut Vec<u8>) -> io::Result<()> {
let p1 = p1.into_handle();
let p2 = p2.into_handle();
let mut p1 = AsyncPipe::new(p1, v1)?;
let mut p2 = AsyncPipe::new(p2, v2)?;
let objs = [p1.event.raw(), p2.event.raw()];
// In a loop we wait for either pipe's scheduled read operation to complete.
// If the operation completes with 0 bytes, that means EOF was reached, in
// which case we just finish out the other pipe entirely.
//
// Note that overlapped I/O is in general super unsafe because we have to
// be careful to ensure that all pointers in play are valid for the entire
// duration of the I/O operation (where tons of operations can also fail).
// The destructor for `AsyncPipe` ends up taking care of most of this.
loop {
let res = unsafe {
c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
};
if res == c::WAIT_OBJECT_0 {
if!p1.result()? ||!p1.schedule_read()? {
return p2.finish()
}
} else if res == c::WAIT_OBJECT_0 + 1 {
if!p2.result()? ||!p2.schedule_read()? {
return p1.finish()
}
} else {
return Err(io::Error::last_os_error())
}
}
}
struct AsyncPipe<'a> {
pipe: Handle,
event: Handle,
overlapped: Box<c::OVERLAPPED>, // needs a stable address
dst: &'a mut Vec<u8>,
state: State,
}
#[derive(PartialEq, Debug)]
enum State {
NotReading,
Reading,
Read(usize),
}
impl<'a> AsyncPipe<'a> {
fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
// Create an event which we'll use to coordinate our overlapped
// opreations, this event will be used in WaitForMultipleObjects
// and passed as part of the OVERLAPPED handle.
//
// Note that we do a somewhat clever thing here by flagging the
// event as being manually reset and setting it initially to the
// signaled state. This means that we'll naturally fall through the
// WaitForMultipleObjects call above for pipes created initially,
// and the only time an even will go back to "unset" will be once an
// I/O operation is successfully scheduled (what we want).
let event = Handle::new_event(true, true)?;
let mut overlapped: Box<c::OVERLAPPED> = unsafe {
Box::new(mem::zeroed())
};
overlapped.hEvent = event.raw();
Ok(AsyncPipe {
pipe: pipe,
overlapped: overlapped,
event: event,
dst: dst,
state: State::NotReading,
})
}
/// Executes an overlapped read operation.
///
/// Must not currently be reading, and returns whether the pipe is currently
/// at EOF or not. If the pipe is not at EOF then `result()` must be called
/// to complete the read later on (may block), but if the pipe is at EOF
/// then `result()` should not be called as it will just block forever.
fn schedule_read(&mut self) -> io::Result<bool> {
assert_eq!(self.state, State::NotReading);
let amt = unsafe {
let slice = slice_to_end(self.dst);
self.pipe.read_overlapped(slice, &mut *self.overlapped)?
};
// If this read finished immediately then our overlapped event will
// remain signaled (it was signaled coming in here) and we'll progress
// down to the method below.
//
// Otherwise the I/O operation is scheduled and the system set our event
// to not signaled, so we flag ourselves into the reading state and move
// on.
self.state = match amt {
Some(0) => return Ok(false),
Some(amt) => State::Read(amt),
None => State::Reading,
};
Ok(true)
}
/// Wait for the result of the overlapped operation previously executed.
///
/// Takes a parameter `wait` which indicates if this pipe is currently being
/// read whether the function should block waiting for the read to complete.
///
/// Return values:
///
/// * `true` - finished any pending read and the pipe is not at EOF (keep
/// going)
/// * `false` - finished any pending read and pipe is at EOF (stop issuing
/// reads)
fn result(&mut self) -> io::Result<bool> {
let amt = match self.state {
State::NotReading => return Ok(true),
State::Reading => {
self.pipe.overlapped_result(&mut *self.overlapped, true)?
}
State::Read(amt) => amt,
};
self.state = State::NotReading;
unsafe {
let len = self.dst.len();
self.dst.set_len(len + amt);
}
Ok(amt!= 0)
}
/// Finishes out reading this pipe entirely.
///
/// Waits for any pending and schedule read, and then calls `read_to_end`
/// if necessary to read all the remaining information.
fn finish(&mut self) -> io::Result<()> {
while self.result()? && self.schedule_read()? {
//...
}
Ok(())
}
}
impl<'a> Drop for AsyncPipe<'a> {
fn drop(&mut self) {
match self.state {
State::Reading => {}
_ => return,
}
// If we have a pending read operation, then we have to make sure that
// it's *done* before we actually drop this type. The kernel requires
// that the `OVERLAPPED` and buffer pointers are valid for the entire
// I/O operation.
//
// To do that, we call `CancelIo` to cancel any pending operation, and
// if that succeeds we wait for the overlapped result.
//
// If anything here fails, there's not really much we can do, so we leak
// the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
if self.pipe.cancel_io().is_err() || self.result().is_err() {
let buf = mem::replace(self.dst, Vec::new());
let overlapped = Box::new(unsafe { mem::zeroed() });
let overlapped = mem::replace(&mut self.overlapped, overlapped);
mem::forget((buf, overlapped));
}
}
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
|
{ &self.inner }
|
identifier_body
|
register.rs
|
//! Box registers
use crate::mir::constant::Constant;
use crate::mir::expr::Expr;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SigmaParsingError;
use crate::serialization::SigmaSerializable;
use crate::serialization::SigmaSerializationError;
use crate::serialization::SigmaSerializeResult;
use ergo_chain_types::Base16EncodedBytes;
use std::convert::TryInto;
use std::{collections::HashMap, convert::TryFrom};
use thiserror::Error;
mod id;
pub use id::*;
mod value;
pub use value::*;
/// Stores non-mandatory registers for the box
#[derive(PartialEq, Eq, Debug, Clone)]
#[cfg_attr(feature = "json", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "json",
serde(
into = "HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>",
try_from = "HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>"
)
)]
pub struct NonMandatoryRegisters(Vec<RegisterValue>);
impl NonMandatoryRegisters {
/// Maximum number of non-mandatory registers
pub const MAX_SIZE: usize = NonMandatoryRegisterId::NUM_REGS;
/// Empty non-mandatory registers
pub fn empty() -> NonMandatoryRegisters {
NonMandatoryRegisters(vec![])
}
/// Create new from map
pub fn new(
regs: HashMap<NonMandatoryRegisterId, Constant>,
) -> Result<NonMandatoryRegisters, NonMandatoryRegistersError> {
NonMandatoryRegisters::try_from(
regs.into_iter()
.map(|(k, v)| (k, v.into()))
.collect::<HashMap<NonMandatoryRegisterId, RegisterValue>>(),
)
}
/// Size of non-mandatory registers set
pub fn len(&self) -> usize {
self.0.len()
}
/// Return true if non-mandatory registers set is empty
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Get register value (returns None, if there is no value for the given register id)
pub fn get(&self, reg_id: NonMandatoryRegisterId) -> Option<&RegisterValue> {
self.0.get(reg_id as usize)
}
/// Get register value as a Constant
/// returns None, if there is no value for the given register id or an error if it's an unparseable
pub fn get_constant(
&self,
reg_id: NonMandatoryRegisterId,
) -> Result<Option<Constant>, RegisterValueError> {
match self
.0
.get(reg_id as usize - NonMandatoryRegisterId::START_INDEX)
{
Some(rv) => match rv.as_constant() {
Ok(c) => Ok(Some(c.clone())),
Err(e) => Err(e),
},
None => Ok(None),
}
}
}
/// Create new from ordered values (first element will be R4, and so on)
impl TryFrom<Vec<RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<RegisterValue>) -> Result<Self, Self::Error> {
if values.len() > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(values.len()))
} else {
Ok(NonMandatoryRegisters(values))
}
}
}
impl TryFrom<Vec<Constant>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<Constant>) -> Result<Self, Self::Error> {
NonMandatoryRegisters::try_from(
values
.into_iter()
.map(RegisterValue::Parsed)
.collect::<Vec<RegisterValue>>(),
)
}
}
impl SigmaSerializable for NonMandatoryRegisters {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> SigmaSerializeResult {
let regs_num = self.len();
w.put_u8(regs_num as u8)?;
for (idx, reg_value) in self.0.iter().enumerate() {
match reg_value {
RegisterValue::Parsed(c) => c.sigma_serialize(w)?,
RegisterValue::ParsedTupleExpr(t) => t.to_tuple_expr().sigma_serialize(w)?,
RegisterValue::Invalid { bytes, error_msg } => {
let bytes_str = base16::encode_lower(bytes);
return Err(SigmaSerializationError::NotSupported(format!("unparseable register value at {0:?} (parsing error: {error_msg}) cannot be serialized in the stream (writer), because it cannot be parsed later. Register value as base16-encoded bytes: {bytes_str}", NonMandatoryRegisterId::get_by_zero_index(idx))));
}
};
}
Ok(())
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SigmaParsingError> {
let regs_num = r.get_u8()?;
let mut additional_regs = Vec::with_capacity(regs_num as usize);
for idx in 0..regs_num {
let expr = Expr::sigma_parse(r)?;
let reg_val = match expr {
Expr::Const(c) => RegisterValue::Parsed(c),
Expr::Tuple(t) => {
RegisterValue::ParsedTupleExpr(EvaluatedTuple::new(t).map_err(|e| {
RegisterValueError::UnexpectedRegisterValue(format!(
"error parsing tuple expression from register {0:?}: {e}",
RegisterId::try_from(idx)
))
})?)
}
_ => {
return Err(RegisterValueError::UnexpectedRegisterValue(format!(
"invalid register ({0:?}) value: {expr:?} (expected Constant or Tuple)",
RegisterId::try_from(idx)
))
.into())
}
};
additional_regs.push(reg_val);
}
Ok(additional_regs.try_into()?)
}
}
/// Possible errors when building NonMandatoryRegisters
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum NonMandatoryRegistersError {
/// Set of register has invalid size(maximum [`NonMandatoryRegisters::MAX_SIZE`])
#[error("invalid non-mandatory registers size ({0})")]
InvalidSize(usize),
/// Set of non-mandatory indexes are not densely packed
#[error("registers are not densely packed (register R{0} is missing)")]
NonDenselyPacked(u8),
}
impl From<NonMandatoryRegisters>
for HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>
{
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_value)| {
(
NonMandatoryRegisterId::get_by_zero_index(i),
// no way of returning an error without writing custom JSON serializer
#[allow(clippy::unwrap_used)]
Base16EncodedBytes::new(®_value.sigma_serialize_bytes()),
)
})
.collect()
}
}
impl From<NonMandatoryRegisters> for HashMap<NonMandatoryRegisterId, RegisterValue> {
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_val)| (NonMandatoryRegisterId::get_by_zero_index(i), reg_val))
.collect()
}
}
impl TryFrom<HashMap<NonMandatoryRegisterId, RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(
reg_map: HashMap<NonMandatoryRegisterId, RegisterValue>,
) -> Result<Self, Self::Error> {
let regs_num = reg_map.len();
if regs_num > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(regs_num))
} else {
let mut res: Vec<RegisterValue> = vec![];
NonMandatoryRegisterId::REG_IDS
.iter()
.take(regs_num)
.try_for_each(|reg_id| match reg_map.get(reg_id) {
Some(v) => Ok(res.push(v.clone())),
None => Err(NonMandatoryRegistersError::NonDenselyPacked(*reg_id as u8)),
})?;
Ok(NonMandatoryRegisters(res))
}
}
|
impl TryFrom<HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>>
for NonMandatoryRegisters
{
type Error = NonMandatoryRegistersError;
fn try_from(
value: HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>,
) -> Result<Self, Self::Error> {
let cm: HashMap<NonMandatoryRegisterId, RegisterValue> =
value.into_iter().map(|(k, v)| (k, v.into())).collect();
NonMandatoryRegisters::try_from(cm)
}
}
impl From<NonMandatoryRegistersError> for SigmaParsingError {
fn from(error: NonMandatoryRegistersError) -> Self {
SigmaParsingError::Misc(error.to_string())
}
}
#[allow(clippy::unwrap_used)]
#[cfg(feature = "arbitrary")]
pub(crate) mod arbitrary {
use super::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
#[derive(Default)]
pub struct ArbNonMandatoryRegistersParams {
pub allow_unparseable: bool,
}
impl Arbitrary for NonMandatoryRegisters {
type Parameters = ArbNonMandatoryRegistersParams;
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
vec(
if params.allow_unparseable {
prop_oneof![
any::<Constant>().prop_map(RegisterValue::Parsed),
vec(any::<u8>(), 0..100).prop_map({
|bytes| RegisterValue::Invalid {
bytes,
error_msg: "unparseable".to_string(),
}
})
]
.boxed()
} else {
any::<Constant>().prop_map(RegisterValue::Parsed).boxed()
},
0..=NonMandatoryRegisterId::NUM_REGS,
)
.prop_map(|reg_values| NonMandatoryRegisters::try_from(reg_values).unwrap())
.boxed()
}
}
}
#[allow(clippy::panic)]
#[allow(clippy::unwrap_used)]
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::*;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#[test]
fn hash_map_roundtrip(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
let regs_from_map = NonMandatoryRegisters::try_from(hash_map);
prop_assert![regs_from_map.is_ok()];
prop_assert_eq![regs_from_map.unwrap(), regs];
}
#[test]
fn get(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
hash_map.keys().try_for_each(|reg_id| {
prop_assert_eq![®s.get_constant(*reg_id).unwrap().unwrap(), hash_map.get(reg_id).unwrap().as_constant().unwrap()];
Ok(())
})?;
}
#[test]
fn reg_id_from_byte(reg_id_byte in 0i8..NonMandatoryRegisterId::END_INDEX as i8) {
assert!(RegisterId::try_from(reg_id_byte).is_ok());
}
#[test]
fn ser_roundtrip(regs in any::<NonMandatoryRegisters>()) {
prop_assert_eq![sigma_serialize_roundtrip(®s), regs];
}
}
#[test]
fn test_empty() {
assert!(NonMandatoryRegisters::empty().is_empty());
}
#[test]
fn test_non_densely_packed_error() {
let mut hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = HashMap::new();
let c: Constant = 1i32.into();
hash_map.insert(NonMandatoryRegisterId::R4, c.clone().into());
// gap, missing R5
hash_map.insert(NonMandatoryRegisterId::R6, c.into());
assert!(NonMandatoryRegisters::try_from(hash_map).is_err());
}
}
|
}
#[cfg(feature = "json")]
|
random_line_split
|
register.rs
|
//! Box registers
use crate::mir::constant::Constant;
use crate::mir::expr::Expr;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SigmaParsingError;
use crate::serialization::SigmaSerializable;
use crate::serialization::SigmaSerializationError;
use crate::serialization::SigmaSerializeResult;
use ergo_chain_types::Base16EncodedBytes;
use std::convert::TryInto;
use std::{collections::HashMap, convert::TryFrom};
use thiserror::Error;
mod id;
pub use id::*;
mod value;
pub use value::*;
/// Stores non-mandatory registers for the box
#[derive(PartialEq, Eq, Debug, Clone)]
#[cfg_attr(feature = "json", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "json",
serde(
into = "HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>",
try_from = "HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>"
)
)]
pub struct NonMandatoryRegisters(Vec<RegisterValue>);
impl NonMandatoryRegisters {
/// Maximum number of non-mandatory registers
pub const MAX_SIZE: usize = NonMandatoryRegisterId::NUM_REGS;
/// Empty non-mandatory registers
pub fn empty() -> NonMandatoryRegisters {
NonMandatoryRegisters(vec![])
}
/// Create new from map
pub fn new(
regs: HashMap<NonMandatoryRegisterId, Constant>,
) -> Result<NonMandatoryRegisters, NonMandatoryRegistersError>
|
/// Size of non-mandatory registers set
pub fn len(&self) -> usize {
self.0.len()
}
/// Return true if non-mandatory registers set is empty
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Get register value (returns None, if there is no value for the given register id)
pub fn get(&self, reg_id: NonMandatoryRegisterId) -> Option<&RegisterValue> {
self.0.get(reg_id as usize)
}
/// Get register value as a Constant
/// returns None, if there is no value for the given register id or an error if it's an unparseable
pub fn get_constant(
&self,
reg_id: NonMandatoryRegisterId,
) -> Result<Option<Constant>, RegisterValueError> {
match self
.0
.get(reg_id as usize - NonMandatoryRegisterId::START_INDEX)
{
Some(rv) => match rv.as_constant() {
Ok(c) => Ok(Some(c.clone())),
Err(e) => Err(e),
},
None => Ok(None),
}
}
}
/// Create new from ordered values (first element will be R4, and so on)
impl TryFrom<Vec<RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<RegisterValue>) -> Result<Self, Self::Error> {
if values.len() > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(values.len()))
} else {
Ok(NonMandatoryRegisters(values))
}
}
}
impl TryFrom<Vec<Constant>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<Constant>) -> Result<Self, Self::Error> {
NonMandatoryRegisters::try_from(
values
.into_iter()
.map(RegisterValue::Parsed)
.collect::<Vec<RegisterValue>>(),
)
}
}
impl SigmaSerializable for NonMandatoryRegisters {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> SigmaSerializeResult {
let regs_num = self.len();
w.put_u8(regs_num as u8)?;
for (idx, reg_value) in self.0.iter().enumerate() {
match reg_value {
RegisterValue::Parsed(c) => c.sigma_serialize(w)?,
RegisterValue::ParsedTupleExpr(t) => t.to_tuple_expr().sigma_serialize(w)?,
RegisterValue::Invalid { bytes, error_msg } => {
let bytes_str = base16::encode_lower(bytes);
return Err(SigmaSerializationError::NotSupported(format!("unparseable register value at {0:?} (parsing error: {error_msg}) cannot be serialized in the stream (writer), because it cannot be parsed later. Register value as base16-encoded bytes: {bytes_str}", NonMandatoryRegisterId::get_by_zero_index(idx))));
}
};
}
Ok(())
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SigmaParsingError> {
let regs_num = r.get_u8()?;
let mut additional_regs = Vec::with_capacity(regs_num as usize);
for idx in 0..regs_num {
let expr = Expr::sigma_parse(r)?;
let reg_val = match expr {
Expr::Const(c) => RegisterValue::Parsed(c),
Expr::Tuple(t) => {
RegisterValue::ParsedTupleExpr(EvaluatedTuple::new(t).map_err(|e| {
RegisterValueError::UnexpectedRegisterValue(format!(
"error parsing tuple expression from register {0:?}: {e}",
RegisterId::try_from(idx)
))
})?)
}
_ => {
return Err(RegisterValueError::UnexpectedRegisterValue(format!(
"invalid register ({0:?}) value: {expr:?} (expected Constant or Tuple)",
RegisterId::try_from(idx)
))
.into())
}
};
additional_regs.push(reg_val);
}
Ok(additional_regs.try_into()?)
}
}
/// Possible errors when building NonMandatoryRegisters
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum NonMandatoryRegistersError {
/// Set of register has invalid size(maximum [`NonMandatoryRegisters::MAX_SIZE`])
#[error("invalid non-mandatory registers size ({0})")]
InvalidSize(usize),
/// Set of non-mandatory indexes are not densely packed
#[error("registers are not densely packed (register R{0} is missing)")]
NonDenselyPacked(u8),
}
impl From<NonMandatoryRegisters>
for HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>
{
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_value)| {
(
NonMandatoryRegisterId::get_by_zero_index(i),
// no way of returning an error without writing custom JSON serializer
#[allow(clippy::unwrap_used)]
Base16EncodedBytes::new(®_value.sigma_serialize_bytes()),
)
})
.collect()
}
}
impl From<NonMandatoryRegisters> for HashMap<NonMandatoryRegisterId, RegisterValue> {
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_val)| (NonMandatoryRegisterId::get_by_zero_index(i), reg_val))
.collect()
}
}
impl TryFrom<HashMap<NonMandatoryRegisterId, RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(
reg_map: HashMap<NonMandatoryRegisterId, RegisterValue>,
) -> Result<Self, Self::Error> {
let regs_num = reg_map.len();
if regs_num > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(regs_num))
} else {
let mut res: Vec<RegisterValue> = vec![];
NonMandatoryRegisterId::REG_IDS
.iter()
.take(regs_num)
.try_for_each(|reg_id| match reg_map.get(reg_id) {
Some(v) => Ok(res.push(v.clone())),
None => Err(NonMandatoryRegistersError::NonDenselyPacked(*reg_id as u8)),
})?;
Ok(NonMandatoryRegisters(res))
}
}
}
#[cfg(feature = "json")]
impl TryFrom<HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>>
for NonMandatoryRegisters
{
type Error = NonMandatoryRegistersError;
fn try_from(
value: HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>,
) -> Result<Self, Self::Error> {
let cm: HashMap<NonMandatoryRegisterId, RegisterValue> =
value.into_iter().map(|(k, v)| (k, v.into())).collect();
NonMandatoryRegisters::try_from(cm)
}
}
impl From<NonMandatoryRegistersError> for SigmaParsingError {
fn from(error: NonMandatoryRegistersError) -> Self {
SigmaParsingError::Misc(error.to_string())
}
}
#[allow(clippy::unwrap_used)]
#[cfg(feature = "arbitrary")]
pub(crate) mod arbitrary {
use super::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
#[derive(Default)]
pub struct ArbNonMandatoryRegistersParams {
pub allow_unparseable: bool,
}
impl Arbitrary for NonMandatoryRegisters {
type Parameters = ArbNonMandatoryRegistersParams;
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
vec(
if params.allow_unparseable {
prop_oneof![
any::<Constant>().prop_map(RegisterValue::Parsed),
vec(any::<u8>(), 0..100).prop_map({
|bytes| RegisterValue::Invalid {
bytes,
error_msg: "unparseable".to_string(),
}
})
]
.boxed()
} else {
any::<Constant>().prop_map(RegisterValue::Parsed).boxed()
},
0..=NonMandatoryRegisterId::NUM_REGS,
)
.prop_map(|reg_values| NonMandatoryRegisters::try_from(reg_values).unwrap())
.boxed()
}
}
}
#[allow(clippy::panic)]
#[allow(clippy::unwrap_used)]
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::*;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#[test]
fn hash_map_roundtrip(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
let regs_from_map = NonMandatoryRegisters::try_from(hash_map);
prop_assert![regs_from_map.is_ok()];
prop_assert_eq![regs_from_map.unwrap(), regs];
}
#[test]
fn get(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
hash_map.keys().try_for_each(|reg_id| {
prop_assert_eq![®s.get_constant(*reg_id).unwrap().unwrap(), hash_map.get(reg_id).unwrap().as_constant().unwrap()];
Ok(())
})?;
}
#[test]
fn reg_id_from_byte(reg_id_byte in 0i8..NonMandatoryRegisterId::END_INDEX as i8) {
assert!(RegisterId::try_from(reg_id_byte).is_ok());
}
#[test]
fn ser_roundtrip(regs in any::<NonMandatoryRegisters>()) {
prop_assert_eq![sigma_serialize_roundtrip(®s), regs];
}
}
#[test]
fn test_empty() {
assert!(NonMandatoryRegisters::empty().is_empty());
}
#[test]
fn test_non_densely_packed_error() {
let mut hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = HashMap::new();
let c: Constant = 1i32.into();
hash_map.insert(NonMandatoryRegisterId::R4, c.clone().into());
// gap, missing R5
hash_map.insert(NonMandatoryRegisterId::R6, c.into());
assert!(NonMandatoryRegisters::try_from(hash_map).is_err());
}
}
|
{
NonMandatoryRegisters::try_from(
regs.into_iter()
.map(|(k, v)| (k, v.into()))
.collect::<HashMap<NonMandatoryRegisterId, RegisterValue>>(),
)
}
|
identifier_body
|
register.rs
|
//! Box registers
use crate::mir::constant::Constant;
use crate::mir::expr::Expr;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SigmaParsingError;
use crate::serialization::SigmaSerializable;
use crate::serialization::SigmaSerializationError;
use crate::serialization::SigmaSerializeResult;
use ergo_chain_types::Base16EncodedBytes;
use std::convert::TryInto;
use std::{collections::HashMap, convert::TryFrom};
use thiserror::Error;
mod id;
pub use id::*;
mod value;
pub use value::*;
/// Stores non-mandatory registers for the box
#[derive(PartialEq, Eq, Debug, Clone)]
#[cfg_attr(feature = "json", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "json",
serde(
into = "HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>",
try_from = "HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>"
)
)]
pub struct NonMandatoryRegisters(Vec<RegisterValue>);
impl NonMandatoryRegisters {
/// Maximum number of non-mandatory registers
pub const MAX_SIZE: usize = NonMandatoryRegisterId::NUM_REGS;
/// Empty non-mandatory registers
pub fn empty() -> NonMandatoryRegisters {
NonMandatoryRegisters(vec![])
}
/// Create new from map
pub fn new(
regs: HashMap<NonMandatoryRegisterId, Constant>,
) -> Result<NonMandatoryRegisters, NonMandatoryRegistersError> {
NonMandatoryRegisters::try_from(
regs.into_iter()
.map(|(k, v)| (k, v.into()))
.collect::<HashMap<NonMandatoryRegisterId, RegisterValue>>(),
)
}
/// Size of non-mandatory registers set
pub fn len(&self) -> usize {
self.0.len()
}
/// Return true if non-mandatory registers set is empty
pub fn
|
(&self) -> bool {
self.0.is_empty()
}
/// Get register value (returns None, if there is no value for the given register id)
pub fn get(&self, reg_id: NonMandatoryRegisterId) -> Option<&RegisterValue> {
self.0.get(reg_id as usize)
}
/// Get register value as a Constant
/// returns None, if there is no value for the given register id or an error if it's an unparseable
pub fn get_constant(
&self,
reg_id: NonMandatoryRegisterId,
) -> Result<Option<Constant>, RegisterValueError> {
match self
.0
.get(reg_id as usize - NonMandatoryRegisterId::START_INDEX)
{
Some(rv) => match rv.as_constant() {
Ok(c) => Ok(Some(c.clone())),
Err(e) => Err(e),
},
None => Ok(None),
}
}
}
/// Create new from ordered values (first element will be R4, and so on)
impl TryFrom<Vec<RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<RegisterValue>) -> Result<Self, Self::Error> {
if values.len() > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(values.len()))
} else {
Ok(NonMandatoryRegisters(values))
}
}
}
impl TryFrom<Vec<Constant>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<Constant>) -> Result<Self, Self::Error> {
NonMandatoryRegisters::try_from(
values
.into_iter()
.map(RegisterValue::Parsed)
.collect::<Vec<RegisterValue>>(),
)
}
}
impl SigmaSerializable for NonMandatoryRegisters {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> SigmaSerializeResult {
let regs_num = self.len();
w.put_u8(regs_num as u8)?;
for (idx, reg_value) in self.0.iter().enumerate() {
match reg_value {
RegisterValue::Parsed(c) => c.sigma_serialize(w)?,
RegisterValue::ParsedTupleExpr(t) => t.to_tuple_expr().sigma_serialize(w)?,
RegisterValue::Invalid { bytes, error_msg } => {
let bytes_str = base16::encode_lower(bytes);
return Err(SigmaSerializationError::NotSupported(format!("unparseable register value at {0:?} (parsing error: {error_msg}) cannot be serialized in the stream (writer), because it cannot be parsed later. Register value as base16-encoded bytes: {bytes_str}", NonMandatoryRegisterId::get_by_zero_index(idx))));
}
};
}
Ok(())
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SigmaParsingError> {
let regs_num = r.get_u8()?;
let mut additional_regs = Vec::with_capacity(regs_num as usize);
for idx in 0..regs_num {
let expr = Expr::sigma_parse(r)?;
let reg_val = match expr {
Expr::Const(c) => RegisterValue::Parsed(c),
Expr::Tuple(t) => {
RegisterValue::ParsedTupleExpr(EvaluatedTuple::new(t).map_err(|e| {
RegisterValueError::UnexpectedRegisterValue(format!(
"error parsing tuple expression from register {0:?}: {e}",
RegisterId::try_from(idx)
))
})?)
}
_ => {
return Err(RegisterValueError::UnexpectedRegisterValue(format!(
"invalid register ({0:?}) value: {expr:?} (expected Constant or Tuple)",
RegisterId::try_from(idx)
))
.into())
}
};
additional_regs.push(reg_val);
}
Ok(additional_regs.try_into()?)
}
}
/// Possible errors when building NonMandatoryRegisters
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum NonMandatoryRegistersError {
/// Set of register has invalid size(maximum [`NonMandatoryRegisters::MAX_SIZE`])
#[error("invalid non-mandatory registers size ({0})")]
InvalidSize(usize),
/// Set of non-mandatory indexes are not densely packed
#[error("registers are not densely packed (register R{0} is missing)")]
NonDenselyPacked(u8),
}
impl From<NonMandatoryRegisters>
for HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>
{
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_value)| {
(
NonMandatoryRegisterId::get_by_zero_index(i),
// no way of returning an error without writing custom JSON serializer
#[allow(clippy::unwrap_used)]
Base16EncodedBytes::new(®_value.sigma_serialize_bytes()),
)
})
.collect()
}
}
impl From<NonMandatoryRegisters> for HashMap<NonMandatoryRegisterId, RegisterValue> {
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_val)| (NonMandatoryRegisterId::get_by_zero_index(i), reg_val))
.collect()
}
}
impl TryFrom<HashMap<NonMandatoryRegisterId, RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(
reg_map: HashMap<NonMandatoryRegisterId, RegisterValue>,
) -> Result<Self, Self::Error> {
let regs_num = reg_map.len();
if regs_num > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(regs_num))
} else {
let mut res: Vec<RegisterValue> = vec![];
NonMandatoryRegisterId::REG_IDS
.iter()
.take(regs_num)
.try_for_each(|reg_id| match reg_map.get(reg_id) {
Some(v) => Ok(res.push(v.clone())),
None => Err(NonMandatoryRegistersError::NonDenselyPacked(*reg_id as u8)),
})?;
Ok(NonMandatoryRegisters(res))
}
}
}
#[cfg(feature = "json")]
impl TryFrom<HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>>
for NonMandatoryRegisters
{
type Error = NonMandatoryRegistersError;
fn try_from(
value: HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>,
) -> Result<Self, Self::Error> {
let cm: HashMap<NonMandatoryRegisterId, RegisterValue> =
value.into_iter().map(|(k, v)| (k, v.into())).collect();
NonMandatoryRegisters::try_from(cm)
}
}
impl From<NonMandatoryRegistersError> for SigmaParsingError {
fn from(error: NonMandatoryRegistersError) -> Self {
SigmaParsingError::Misc(error.to_string())
}
}
#[allow(clippy::unwrap_used)]
#[cfg(feature = "arbitrary")]
pub(crate) mod arbitrary {
use super::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
#[derive(Default)]
pub struct ArbNonMandatoryRegistersParams {
pub allow_unparseable: bool,
}
impl Arbitrary for NonMandatoryRegisters {
type Parameters = ArbNonMandatoryRegistersParams;
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
vec(
if params.allow_unparseable {
prop_oneof![
any::<Constant>().prop_map(RegisterValue::Parsed),
vec(any::<u8>(), 0..100).prop_map({
|bytes| RegisterValue::Invalid {
bytes,
error_msg: "unparseable".to_string(),
}
})
]
.boxed()
} else {
any::<Constant>().prop_map(RegisterValue::Parsed).boxed()
},
0..=NonMandatoryRegisterId::NUM_REGS,
)
.prop_map(|reg_values| NonMandatoryRegisters::try_from(reg_values).unwrap())
.boxed()
}
}
}
#[allow(clippy::panic)]
#[allow(clippy::unwrap_used)]
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::*;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#[test]
fn hash_map_roundtrip(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
let regs_from_map = NonMandatoryRegisters::try_from(hash_map);
prop_assert![regs_from_map.is_ok()];
prop_assert_eq![regs_from_map.unwrap(), regs];
}
#[test]
fn get(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
hash_map.keys().try_for_each(|reg_id| {
prop_assert_eq![®s.get_constant(*reg_id).unwrap().unwrap(), hash_map.get(reg_id).unwrap().as_constant().unwrap()];
Ok(())
})?;
}
#[test]
fn reg_id_from_byte(reg_id_byte in 0i8..NonMandatoryRegisterId::END_INDEX as i8) {
assert!(RegisterId::try_from(reg_id_byte).is_ok());
}
#[test]
fn ser_roundtrip(regs in any::<NonMandatoryRegisters>()) {
prop_assert_eq![sigma_serialize_roundtrip(®s), regs];
}
}
#[test]
fn test_empty() {
assert!(NonMandatoryRegisters::empty().is_empty());
}
#[test]
fn test_non_densely_packed_error() {
let mut hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = HashMap::new();
let c: Constant = 1i32.into();
hash_map.insert(NonMandatoryRegisterId::R4, c.clone().into());
// gap, missing R5
hash_map.insert(NonMandatoryRegisterId::R6, c.into());
assert!(NonMandatoryRegisters::try_from(hash_map).is_err());
}
}
|
is_empty
|
identifier_name
|
mod.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if!release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if!release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn shift(&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"),
KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"),
KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self)
|
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed)!= 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button!= button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
}
|
{ self.0.fetch_or(1, Ordering::Relaxed); }
|
identifier_body
|
mod.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if!release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if!release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn shift(&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"),
|
KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self) { self.0.fetch_or(1, Ordering::Relaxed); }
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed)!= 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button!= button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
}
|
KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"),
|
random_line_split
|
mod.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if!release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if!release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn
|
(&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"),
KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"),
KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self) { self.0.fetch_or(1, Ordering::Relaxed); }
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed)!= 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button!= button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
}
|
shift
|
identifier_name
|
lib.rs
|
extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
pub struct Tessel {
// A group of module ports.
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value
|
else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because.float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn toggle(&mut self) -> Result<(), io::Error> {
let new_value =!self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
|
{
sock.write_command(Command::GpioHigh(self.index as u8))
}
|
conditional_block
|
lib.rs
|
extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
pub struct Tessel {
// A group of module ports.
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value {
sock.write_command(Command::GpioHigh(self.index as u8))
} else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because.float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8)
|
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn toggle(&mut self) -> Result<(), io::Error> {
let new_value =!self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
|
{
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
|
identifier_body
|
lib.rs
|
extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
pub struct Tessel {
// A group of module ports.
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value {
sock.write_command(Command::GpioHigh(self.index as u8))
} else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because.float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn
|
(&mut self) -> Result<(), io::Error> {
let new_value =!self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
|
toggle
|
identifier_name
|
lib.rs
|
extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
|
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value {
sock.write_command(Command::GpioHigh(self.index as u8))
} else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because.float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn toggle(&mut self) -> Result<(), io::Error> {
let new_value =!self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
|
pub struct Tessel {
// A group of module ports.
|
random_line_split
|
watch.rs
|
//! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind,.. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if!interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn extend_filter(path: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if!event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" =>?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if!self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if!self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if!path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
|
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
|
identifier_body
|
|
watch.rs
|
//! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind,.. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if!interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn extend_filter(path: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if!event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" =>?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if!self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if!self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if!path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
|
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
|
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
|
conditional_block
|
watch.rs
|
//! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind,.. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if!interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn extend_filter(path: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if!event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" =>?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if!self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if!self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if!path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
|
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
|
random_line_split
|
|
watch.rs
|
//! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind,.. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if!interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn ex
|
ath: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if!event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" =>?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if!self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if!self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if!path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
|
tend_filter(p
|
identifier_name
|
lib.rs
|
//! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
|
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync +'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn fuzz_header(data: &[u8]) {
if let Ok(header) = format::Header::read(data) {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
}
|
//! let plaintext = b"Hello world!";
|
random_line_split
|
lib.rs
|
//! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
//! let plaintext = b"Hello world!";
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync +'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn
|
(data: &[u8]) {
if let Ok(header) = format::Header::read(data) {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
}
|
fuzz_header
|
identifier_name
|
lib.rs
|
//! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
//! let plaintext = b"Hello world!";
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync +'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn fuzz_header(data: &[u8]) {
if let Ok(header) = format::Header::read(data)
|
}
|
{
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
|
conditional_block
|
lib.rs
|
//! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
//! let plaintext = b"Hello world!";
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! //... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync +'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn fuzz_header(data: &[u8])
|
{
if let Ok(header) = format::Header::read(data) {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
}
|
identifier_body
|
|
main.rs
|
::{debug, error, info, warn, Level};
use transfer_syntax::TransferSyntaxIndex;
use walkdir::WalkDir;
/// DICOM C-STORE SCU
#[derive(Debug, Parser)]
#[command(version)]
struct App {
/// socket address to Store SCP,
/// optionally with AE title
/// (example: "[email protected]:104")
addr: String,
/// the DICOM file(s) to store
#[arg(required = true)]
files: Vec<PathBuf>,
/// verbose mode
#[arg(short = 'v', long = "verbose")]
verbose: bool,
/// the C-STORE message ID
#[arg(short ='m', long = "message-id", default_value = "1")]
message_id: u16,
/// the calling Application Entity title
#[arg(long = "calling-ae-title", default_value = "STORE-SCU")]
calling_ae_title: String,
/// the called Application Entity title,
/// overrides AE title in address if present [default: ANY-SCP]
#[arg(long = "called-ae-title")]
called_ae_title: Option<String>,
/// the maximum PDU length accepted by the SCU
#[arg(long = "max-pdu-length", default_value = "16384")]
max_pdu_length: u32,
/// fail if not all DICOM files can be transferred
#[arg(long = "fail-first")]
fail_first: bool,
}
struct DicomFile {
/// File path
file: PathBuf,
/// Storage SOP Class UID
sop_class_uid: String,
/// Storage SOP Instance UID
sop_instance_uid: String,
/// File Transfer Syntax
file_transfer_syntax: String,
/// Transfer Syntax selected
ts_selected: Option<String>,
/// Presentation Context selected
pc_selected: Option<dicom_ul::pdu::PresentationContextResult>,
}
#[derive(Debug, Snafu)]
enum Error {
/// Could not initialize SCU
InitScu {
source: dicom_ul::association::client::Error,
},
/// Could not construct DICOM command
CreateCommand { source: dicom_object::WriteError },
#[snafu(whatever, display("{}", message))]
Other {
message: String,
#[snafu(source(from(Box<dyn std::error::Error +'static>, Some)))]
source: Option<Box<dyn std::error::Error +'static>>,
},
}
fn main() {
run().unwrap_or_else(|e| {
error!("{}", Report::from_error(e));
std::process::exit(-2);
});
}
fn run() -> Result<(), Error> {
let App {
addr,
files,
verbose,
message_id,
calling_ae_title,
called_ae_title,
max_pdu_length,
fail_first,
} = App::parse();
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(if verbose { Level::DEBUG } else { Level::INFO })
.finish(),
)
.whatever_context("Could not set up global logging subscriber")
.unwrap_or_else(|e: Whatever| {
eprintln!("[ERROR] {}", Report::from_error(e));
});
let mut checked_files: Vec<PathBuf> = vec![];
let mut dicom_files: Vec<DicomFile> = vec![];
let mut presentation_contexts = HashSet::new();
for file in files {
if file.is_dir() {
for file in WalkDir::new(file.as_path())
.into_iter()
.filter_map(Result::ok)
.filter(|f|!f.file_type().is_dir())
{
checked_files.push(file.into_path());
}
} else {
checked_files.push(file);
}
}
for file in checked_files {
if verbose {
info!("Opening file '{}'...", file.display());
}
match check_file(&file) {
Ok(dicom_file) => {
presentation_contexts.insert((
dicom_file.sop_class_uid.to_string(),
dicom_file.file_transfer_syntax.clone(),
));
dicom_files.push(dicom_file);
}
Err(_) => {
warn!("Could not open file {} as DICOM", file.display());
}
}
}
if dicom_files.is_empty() {
eprintln!("No supported files to transfer");
std::process::exit(-1);
}
if verbose {
info!("Establishing association with '{}'...", &addr);
}
let mut scu_init = ClientAssociationOptions::new()
.calling_ae_title(calling_ae_title)
.max_pdu_length(max_pdu_length);
for (storage_sop_class_uid, transfer_syntax) in &presentation_contexts {
scu_init = scu_init.with_presentation_context(storage_sop_class_uid, vec![transfer_syntax]);
}
if let Some(called_ae_title) = called_ae_title {
scu_init = scu_init.called_ae_title(called_ae_title);
}
let mut scu = scu_init.establish_with(&addr).context(InitScuSnafu)?;
if verbose {
info!("Association established");
}
for file in &mut dicom_files {
// TODO(#106) transfer syntax conversion is currently not supported
let r: Result<_, Error> = check_presentation_contexts(file, scu.presentation_contexts())
.whatever_context::<_, _>("Could not choose a transfer syntax");
match r {
Ok((pc, ts)) => {
file.pc_selected = Some(pc);
file.ts_selected = Some(ts);
}
Err(e) => {
error!("{}", Report::from_error(e));
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
}
}
let progress_bar;
if!verbose {
progress_bar = Some(ProgressBar::new(dicom_files.len() as u64));
if let Some(pb) = progress_bar.as_ref() {
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40} {pos}/{len} {wide_msg}")
.expect("Invalid progress bar template"),
);
pb.enable_steady_tick(Duration::new(0, 480_000_000));
};
} else {
progress_bar = None;
}
for file in dicom_files {
if let (Some(pc_selected), Some(ts_uid_selected)) = (file.pc_selected, file.ts_selected) {
if let Some(pb) = &progress_bar {
pb.set_message(file.sop_instance_uid.clone());
}
let cmd = store_req_command(&file.sop_class_uid, &file.sop_instance_uid, message_id);
let mut cmd_data = Vec::with_capacity(128);
cmd.write_dataset_with_ts(
&mut cmd_data,
&dicom_transfer_syntax_registry::entries::IMPLICIT_VR_LITTLE_ENDIAN.erased(),
)
.context(CreateCommandSnafu)?;
let mut object_data = Vec::with_capacity(2048);
let dicom_file =
open_file(&file.file).whatever_context("Could not open listed DICOM file")?;
let ts_selected = TransferSyntaxRegistry
.get(&ts_uid_selected)
.whatever_context("Unsupported file transfer syntax")?;
dicom_file
.write_dataset_with_ts(&mut object_data, ts_selected)
.whatever_context("Could not write object dataset")?;
let nbytes = cmd_data.len() + object_data.len();
if verbose {
info!(
"Sending file {} (~ {} kB), uid={}, sop={}, ts={}",
file.file.display(),
nbytes / 1_000,
&file.sop_instance_uid,
&file.sop_class_uid,
ts_uid_selected,
);
}
if nbytes < scu.acceptor_max_pdu_length().saturating_sub(100) as usize {
let pdu = Pdu::PData {
data: vec![
PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Command,
is_last: true,
data: cmd_data,
},
PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Data,
is_last: true,
data: object_data,
},
],
};
scu.send(&pdu)
.whatever_context("Failed to send C-STORE-RQ")?;
} else {
let pdu = Pdu::PData {
data: vec![PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Command,
is_last: true,
data: cmd_data,
}],
};
scu.send(&pdu)
.whatever_context("Failed to send C-STORE-RQ command")?;
{
let mut pdata = scu.send_pdata(pc_selected.id);
pdata
.write_all(&object_data)
.whatever_context("Failed to send C-STORE-RQ P-Data")?;
}
}
if verbose {
debug!("Awaiting response...");
}
let rsp_pdu = scu
.receive()
.whatever_context("Failed to receive C-STORE-RSP")?;
match rsp_pdu {
Pdu::PData { data } => {
let data_value = &data[0];
let cmd_obj = InMemDicomObject::read_dataset_with_ts(
&data_value.data[..],
&dicom_transfer_syntax_registry::entries::IMPLICIT_VR_LITTLE_ENDIAN
.erased(),
)
.whatever_context("Could not read response from SCP")?;
if verbose {
debug!("Full response: {:?}", cmd_obj);
}
let status = cmd_obj
.element(tags::STATUS)
.whatever_context("Could not find status code in response")?
.to_int::<u16>()
.whatever_context("Status code in response is not a valid integer")?;
let storage_sop_instance_uid = file
.sop_instance_uid
.trim_end_matches(|c: char| c.is_whitespace() || c == '\0');
match status {
// Success
0 => {
if verbose {
info!("Successfully stored instance {}", storage_sop_instance_uid);
}
}
// Warning
1 | 0x0107 | 0x0116 | 0xB000..=0xBFFF => {
warn!(
"Possible issue storing instance `{}` (status code {:04X}H)",
storage_sop_instance_uid, status
);
}
0xFF00 | 0xFF01 => {
warn!(
"Possible issue storing instance `{}`: status is pending (status code {:04X}H)",
storage_sop_instance_uid, status
);
}
0xFE00 => {
error!(
"Could not store instance `{}`: operation cancelled",
storage_sop_instance_uid
);
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
_ => {
error!(
"Failed to store instance `{}` (status code {:04X}H)",
storage_sop_instance_uid, status
);
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
}
}
pdu @ Pdu::Unknown {.. }
| pdu @ Pdu::AssociationRQ {.. }
| pdu @ Pdu::AssociationAC {.. }
| pdu @ Pdu::AssociationRJ {.. }
| pdu @ Pdu::ReleaseRQ
| pdu @ Pdu::ReleaseRP
| pdu @ Pdu::AbortRQ {.. } => {
error!("Unexpected SCP response: {:?}", pdu);
let _ = scu.abort();
std::process::exit(-2);
}
}
}
if let Some(pb) = progress_bar.as_ref() {
pb.inc(1)
};
}
if let Some(pb) = progress_bar {
pb.finish_with_message("done")
};
scu.release()
.whatever_context("Failed to release SCU association")?;
Ok(())
}
fn store_req_command(
storage_sop_class_uid: &str,
storage_sop_instance_uid: &str,
message_id: u16,
) -> InMemDicomObject<StandardDataDictionary> {
InMemDicomObject::command_from_element_iter([
// SOP Class UID
DataElement::new(
tags::AFFECTED_SOP_CLASS_UID,
VR::UI,
dicom_value!(Str, storage_sop_class_uid),
),
// command field
DataElement::new(tags::COMMAND_FIELD, VR::US, dicom_value!(U16, [0x0001])),
// message ID
DataElement::new(tags::MESSAGE_ID, VR::US, dicom_value!(U16, [message_id])),
//priority
DataElement::new(tags::PRIORITY, VR::US, dicom_value!(U16, [0x0000])),
// data set type
DataElement::new(
tags::COMMAND_DATA_SET_TYPE,
VR::US,
dicom_value!(U16, [0x0000]),
),
// affected SOP Instance UID
DataElement::new(
tags::AFFECTED_SOP_INSTANCE_UID,
VR::UI,
dicom_value!(Str, storage_sop_instance_uid),
),
])
}
fn check_file(file: &Path) -> Result<DicomFile, Error> {
// Ignore DICOMDIR files until better support is added
let _ = (file.file_name()!= Some(OsStr::new("DICOMDIR")))
.then_some(false)
.whatever_context("DICOMDIR file not supported")?;
let dicom_file = dicom_object::OpenFileOptions::new()
.read_until(Tag(0x0001, 0x000))
.open_file(file)
.with_whatever_context(|_| format!("Could not open DICOM file {}", file.display()))?;
let meta = dicom_file.meta();
let storage_sop_class_uid = &meta.media_storage_sop_class_uid;
let storage_sop_instance_uid = &meta.media_storage_sop_instance_uid;
let transfer_syntax_uid = &meta.transfer_syntax.trim_end_matches('\0');
let ts = TransferSyntaxRegistry
.get(transfer_syntax_uid)
.whatever_context("Unsupported file transfer syntax")?;
Ok(DicomFile {
file: file.to_path_buf(),
sop_class_uid: storage_sop_class_uid.to_string(),
sop_instance_uid: storage_sop_instance_uid.to_string(),
file_transfer_syntax: String::from(ts.uid()),
ts_selected: None,
pc_selected: None,
})
}
fn
|
(
file: &DicomFile,
pcs: &[dicom_ul::pdu::PresentationContextResult],
) -> Result<(dicom_ul::pdu::PresentationContextResult, String), Error> {
let file_ts = TransferSyntaxRegistry
.get(&file.file_transfer_syntax)
.whatever_context("Unsupported file transfer syntax")?;
// TODO(#106) transfer syntax conversion is currently not supported
let pc = pcs
.iter()
|
check_presentation_contexts
|
identifier_name
|
main.rs
|
::{debug, error, info, warn, Level};
use transfer_syntax::TransferSyntaxIndex;
use walkdir::WalkDir;
/// DICOM C-STORE SCU
#[derive(Debug, Parser)]
#[command(version)]
struct App {
/// socket address to Store SCP,
/// optionally with AE title
/// (example: "[email protected]:104")
addr: String,
/// the DICOM file(s) to store
#[arg(required = true)]
files: Vec<PathBuf>,
/// verbose mode
#[arg(short = 'v', long = "verbose")]
verbose: bool,
/// the C-STORE message ID
#[arg(short ='m', long = "message-id", default_value = "1")]
message_id: u16,
/// the calling Application Entity title
#[arg(long = "calling-ae-title", default_value = "STORE-SCU")]
calling_ae_title: String,
/// the called Application Entity title,
/// overrides AE title in address if present [default: ANY-SCP]
#[arg(long = "called-ae-title")]
called_ae_title: Option<String>,
/// the maximum PDU length accepted by the SCU
#[arg(long = "max-pdu-length", default_value = "16384")]
max_pdu_length: u32,
/// fail if not all DICOM files can be transferred
#[arg(long = "fail-first")]
fail_first: bool,
}
struct DicomFile {
/// File path
file: PathBuf,
/// Storage SOP Class UID
sop_class_uid: String,
/// Storage SOP Instance UID
sop_instance_uid: String,
/// File Transfer Syntax
file_transfer_syntax: String,
/// Transfer Syntax selected
ts_selected: Option<String>,
/// Presentation Context selected
pc_selected: Option<dicom_ul::pdu::PresentationContextResult>,
}
#[derive(Debug, Snafu)]
enum Error {
/// Could not initialize SCU
InitScu {
source: dicom_ul::association::client::Error,
},
/// Could not construct DICOM command
CreateCommand { source: dicom_object::WriteError },
#[snafu(whatever, display("{}", message))]
Other {
message: String,
#[snafu(source(from(Box<dyn std::error::Error +'static>, Some)))]
source: Option<Box<dyn std::error::Error +'static>>,
},
}
fn main() {
run().unwrap_or_else(|e| {
error!("{}", Report::from_error(e));
std::process::exit(-2);
});
}
fn run() -> Result<(), Error> {
let App {
addr,
files,
verbose,
message_id,
calling_ae_title,
called_ae_title,
max_pdu_length,
fail_first,
} = App::parse();
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(if verbose { Level::DEBUG } else { Level::INFO })
.finish(),
)
.whatever_context("Could not set up global logging subscriber")
.unwrap_or_else(|e: Whatever| {
eprintln!("[ERROR] {}", Report::from_error(e));
});
let mut checked_files: Vec<PathBuf> = vec![];
let mut dicom_files: Vec<DicomFile> = vec![];
let mut presentation_contexts = HashSet::new();
for file in files {
if file.is_dir() {
for file in WalkDir::new(file.as_path())
.into_iter()
.filter_map(Result::ok)
.filter(|f|!f.file_type().is_dir())
{
checked_files.push(file.into_path());
}
} else {
checked_files.push(file);
}
}
for file in checked_files {
if verbose {
info!("Opening file '{}'...", file.display());
}
match check_file(&file) {
Ok(dicom_file) => {
presentation_contexts.insert((
dicom_file.sop_class_uid.to_string(),
dicom_file.file_transfer_syntax.clone(),
));
dicom_files.push(dicom_file);
}
Err(_) => {
warn!("Could not open file {} as DICOM", file.display());
}
}
}
if dicom_files.is_empty() {
eprintln!("No supported files to transfer");
std::process::exit(-1);
}
if verbose {
info!("Establishing association with '{}'...", &addr);
}
let mut scu_init = ClientAssociationOptions::new()
.calling_ae_title(calling_ae_title)
.max_pdu_length(max_pdu_length);
for (storage_sop_class_uid, transfer_syntax) in &presentation_contexts {
scu_init = scu_init.with_presentation_context(storage_sop_class_uid, vec![transfer_syntax]);
}
if let Some(called_ae_title) = called_ae_title {
scu_init = scu_init.called_ae_title(called_ae_title);
}
let mut scu = scu_init.establish_with(&addr).context(InitScuSnafu)?;
if verbose {
info!("Association established");
}
for file in &mut dicom_files {
// TODO(#106) transfer syntax conversion is currently not supported
let r: Result<_, Error> = check_presentation_contexts(file, scu.presentation_contexts())
.whatever_context::<_, _>("Could not choose a transfer syntax");
match r {
Ok((pc, ts)) => {
file.pc_selected = Some(pc);
file.ts_selected = Some(ts);
}
Err(e) => {
error!("{}", Report::from_error(e));
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
}
}
let progress_bar;
if!verbose {
progress_bar = Some(ProgressBar::new(dicom_files.len() as u64));
if let Some(pb) = progress_bar.as_ref() {
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40} {pos}/{len} {wide_msg}")
.expect("Invalid progress bar template"),
);
pb.enable_steady_tick(Duration::new(0, 480_000_000));
};
} else {
progress_bar = None;
}
for file in dicom_files {
if let (Some(pc_selected), Some(ts_uid_selected)) = (file.pc_selected, file.ts_selected) {
if let Some(pb) = &progress_bar {
pb.set_message(file.sop_instance_uid.clone());
}
let cmd = store_req_command(&file.sop_class_uid, &file.sop_instance_uid, message_id);
let mut cmd_data = Vec::with_capacity(128);
cmd.write_dataset_with_ts(
&mut cmd_data,
&dicom_transfer_syntax_registry::entries::IMPLICIT_VR_LITTLE_ENDIAN.erased(),
)
.context(CreateCommandSnafu)?;
let mut object_data = Vec::with_capacity(2048);
let dicom_file =
open_file(&file.file).whatever_context("Could not open listed DICOM file")?;
let ts_selected = TransferSyntaxRegistry
.get(&ts_uid_selected)
.whatever_context("Unsupported file transfer syntax")?;
dicom_file
.write_dataset_with_ts(&mut object_data, ts_selected)
.whatever_context("Could not write object dataset")?;
let nbytes = cmd_data.len() + object_data.len();
if verbose {
info!(
"Sending file {} (~ {} kB), uid={}, sop={}, ts={}",
file.file.display(),
nbytes / 1_000,
&file.sop_instance_uid,
&file.sop_class_uid,
ts_uid_selected,
);
}
if nbytes < scu.acceptor_max_pdu_length().saturating_sub(100) as usize {
let pdu = Pdu::PData {
data: vec![
PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Command,
is_last: true,
data: cmd_data,
},
PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Data,
is_last: true,
data: object_data,
},
],
};
scu.send(&pdu)
.whatever_context("Failed to send C-STORE-RQ")?;
} else {
let pdu = Pdu::PData {
data: vec![PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Command,
is_last: true,
data: cmd_data,
}],
};
scu.send(&pdu)
.whatever_context("Failed to send C-STORE-RQ command")?;
{
let mut pdata = scu.send_pdata(pc_selected.id);
pdata
.write_all(&object_data)
.whatever_context("Failed to send C-STORE-RQ P-Data")?;
}
}
if verbose {
debug!("Awaiting response...");
}
let rsp_pdu = scu
.receive()
.whatever_context("Failed to receive C-STORE-RSP")?;
match rsp_pdu {
Pdu::PData { data } => {
let data_value = &data[0];
let cmd_obj = InMemDicomObject::read_dataset_with_ts(
&data_value.data[..],
&dicom_transfer_syntax_registry::entries::IMPLICIT_VR_LITTLE_ENDIAN
.erased(),
)
.whatever_context("Could not read response from SCP")?;
if verbose {
debug!("Full response: {:?}", cmd_obj);
}
let status = cmd_obj
.element(tags::STATUS)
.whatever_context("Could not find status code in response")?
.to_int::<u16>()
.whatever_context("Status code in response is not a valid integer")?;
let storage_sop_instance_uid = file
.sop_instance_uid
.trim_end_matches(|c: char| c.is_whitespace() || c == '\0');
match status {
// Success
0 => {
if verbose {
info!("Successfully stored instance {}", storage_sop_instance_uid);
}
}
// Warning
1 | 0x0107 | 0x0116 | 0xB000..=0xBFFF => {
warn!(
"Possible issue storing instance `{}` (status code {:04X}H)",
storage_sop_instance_uid, status
);
}
0xFF00 | 0xFF01 => {
warn!(
"Possible issue storing instance `{}`: status is pending (status code {:04X}H)",
storage_sop_instance_uid, status
);
}
0xFE00 => {
error!(
"Could not store instance `{}`: operation cancelled",
storage_sop_instance_uid
);
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
_ => {
error!(
"Failed to store instance `{}` (status code {:04X}H)",
storage_sop_instance_uid, status
);
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
}
}
pdu @ Pdu::Unknown {.. }
| pdu @ Pdu::AssociationRQ {.. }
| pdu @ Pdu::AssociationAC {.. }
| pdu @ Pdu::AssociationRJ {.. }
| pdu @ Pdu::ReleaseRQ
| pdu @ Pdu::ReleaseRP
| pdu @ Pdu::AbortRQ {.. } =>
|
}
}
if let Some(pb) = progress_bar.as_ref() {
pb.inc(1)
};
}
if let Some(pb) = progress_bar {
pb.finish_with_message("done")
};
scu.release()
.whatever_context("Failed to release SCU association")?;
Ok(())
}
fn store_req_command(
storage_sop_class_uid: &str,
storage_sop_instance_uid: &str,
message_id: u16,
) -> InMemDicomObject<StandardDataDictionary> {
InMemDicomObject::command_from_element_iter([
// SOP Class UID
DataElement::new(
tags::AFFECTED_SOP_CLASS_UID,
VR::UI,
dicom_value!(Str, storage_sop_class_uid),
),
// command field
DataElement::new(tags::COMMAND_FIELD, VR::US, dicom_value!(U16, [0x0001])),
// message ID
DataElement::new(tags::MESSAGE_ID, VR::US, dicom_value!(U16, [message_id])),
//priority
DataElement::new(tags::PRIORITY, VR::US, dicom_value!(U16, [0x0000])),
// data set type
DataElement::new(
tags::COMMAND_DATA_SET_TYPE,
VR::US,
dicom_value!(U16, [0x0000]),
),
// affected SOP Instance UID
DataElement::new(
tags::AFFECTED_SOP_INSTANCE_UID,
VR::UI,
dicom_value!(Str, storage_sop_instance_uid),
),
])
}
fn check_file(file: &Path) -> Result<DicomFile, Error> {
// Ignore DICOMDIR files until better support is added
let _ = (file.file_name()!= Some(OsStr::new("DICOMDIR")))
.then_some(false)
.whatever_context("DICOMDIR file not supported")?;
let dicom_file = dicom_object::OpenFileOptions::new()
.read_until(Tag(0x0001, 0x000))
.open_file(file)
.with_whatever_context(|_| format!("Could not open DICOM file {}", file.display()))?;
let meta = dicom_file.meta();
let storage_sop_class_uid = &meta.media_storage_sop_class_uid;
let storage_sop_instance_uid = &meta.media_storage_sop_instance_uid;
let transfer_syntax_uid = &meta.transfer_syntax.trim_end_matches('\0');
let ts = TransferSyntaxRegistry
.get(transfer_syntax_uid)
.whatever_context("Unsupported file transfer syntax")?;
Ok(DicomFile {
file: file.to_path_buf(),
sop_class_uid: storage_sop_class_uid.to_string(),
sop_instance_uid: storage_sop_instance_uid.to_string(),
file_transfer_syntax: String::from(ts.uid()),
ts_selected: None,
pc_selected: None,
})
}
fn check_presentation_contexts(
file: &DicomFile,
pcs: &[dicom_ul::pdu::PresentationContextResult],
) -> Result<(dicom_ul::pdu::PresentationContextResult, String), Error> {
let file_ts = TransferSyntaxRegistry
.get(&file.file_transfer_syntax)
.whatever_context("Unsupported file transfer syntax")?;
// TODO(#106) transfer syntax conversion is currently not supported
let pc = pcs
.iter()
|
{
error!("Unexpected SCP response: {:?}", pdu);
let _ = scu.abort();
std::process::exit(-2);
}
|
conditional_block
|
main.rs
|
use tracing::{debug, error, info, warn, Level};
use transfer_syntax::TransferSyntaxIndex;
use walkdir::WalkDir;
/// DICOM C-STORE SCU
#[derive(Debug, Parser)]
#[command(version)]
struct App {
/// socket address to Store SCP,
/// optionally with AE title
/// (example: "[email protected]:104")
addr: String,
/// the DICOM file(s) to store
#[arg(required = true)]
files: Vec<PathBuf>,
/// verbose mode
#[arg(short = 'v', long = "verbose")]
verbose: bool,
/// the C-STORE message ID
#[arg(short ='m', long = "message-id", default_value = "1")]
message_id: u16,
/// the calling Application Entity title
#[arg(long = "calling-ae-title", default_value = "STORE-SCU")]
calling_ae_title: String,
/// the called Application Entity title,
/// overrides AE title in address if present [default: ANY-SCP]
#[arg(long = "called-ae-title")]
called_ae_title: Option<String>,
/// the maximum PDU length accepted by the SCU
#[arg(long = "max-pdu-length", default_value = "16384")]
max_pdu_length: u32,
/// fail if not all DICOM files can be transferred
#[arg(long = "fail-first")]
fail_first: bool,
}
struct DicomFile {
/// File path
file: PathBuf,
/// Storage SOP Class UID
sop_class_uid: String,
/// Storage SOP Instance UID
sop_instance_uid: String,
/// File Transfer Syntax
file_transfer_syntax: String,
/// Transfer Syntax selected
ts_selected: Option<String>,
/// Presentation Context selected
pc_selected: Option<dicom_ul::pdu::PresentationContextResult>,
}
#[derive(Debug, Snafu)]
enum Error {
/// Could not initialize SCU
InitScu {
source: dicom_ul::association::client::Error,
},
/// Could not construct DICOM command
CreateCommand { source: dicom_object::WriteError },
#[snafu(whatever, display("{}", message))]
Other {
message: String,
#[snafu(source(from(Box<dyn std::error::Error +'static>, Some)))]
source: Option<Box<dyn std::error::Error +'static>>,
},
}
fn main() {
run().unwrap_or_else(|e| {
error!("{}", Report::from_error(e));
std::process::exit(-2);
});
}
fn run() -> Result<(), Error> {
let App {
addr,
files,
verbose,
message_id,
calling_ae_title,
called_ae_title,
max_pdu_length,
fail_first,
} = App::parse();
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(if verbose { Level::DEBUG } else { Level::INFO })
.finish(),
)
.whatever_context("Could not set up global logging subscriber")
.unwrap_or_else(|e: Whatever| {
eprintln!("[ERROR] {}", Report::from_error(e));
});
let mut checked_files: Vec<PathBuf> = vec![];
let mut dicom_files: Vec<DicomFile> = vec![];
let mut presentation_contexts = HashSet::new();
for file in files {
if file.is_dir() {
for file in WalkDir::new(file.as_path())
.into_iter()
.filter_map(Result::ok)
.filter(|f|!f.file_type().is_dir())
{
checked_files.push(file.into_path());
}
} else {
checked_files.push(file);
}
}
for file in checked_files {
if verbose {
info!("Opening file '{}'...", file.display());
}
match check_file(&file) {
Ok(dicom_file) => {
presentation_contexts.insert((
dicom_file.sop_class_uid.to_string(),
dicom_file.file_transfer_syntax.clone(),
));
dicom_files.push(dicom_file);
}
Err(_) => {
warn!("Could not open file {} as DICOM", file.display());
}
}
}
if dicom_files.is_empty() {
eprintln!("No supported files to transfer");
std::process::exit(-1);
}
if verbose {
info!("Establishing association with '{}'...", &addr);
}
let mut scu_init = ClientAssociationOptions::new()
.calling_ae_title(calling_ae_title)
.max_pdu_length(max_pdu_length);
for (storage_sop_class_uid, transfer_syntax) in &presentation_contexts {
scu_init = scu_init.with_presentation_context(storage_sop_class_uid, vec![transfer_syntax]);
}
if let Some(called_ae_title) = called_ae_title {
scu_init = scu_init.called_ae_title(called_ae_title);
}
let mut scu = scu_init.establish_with(&addr).context(InitScuSnafu)?;
if verbose {
info!("Association established");
}
for file in &mut dicom_files {
// TODO(#106) transfer syntax conversion is currently not supported
let r: Result<_, Error> = check_presentation_contexts(file, scu.presentation_contexts())
.whatever_context::<_, _>("Could not choose a transfer syntax");
match r {
Ok((pc, ts)) => {
file.pc_selected = Some(pc);
file.ts_selected = Some(ts);
}
Err(e) => {
error!("{}", Report::from_error(e));
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
}
}
let progress_bar;
if!verbose {
progress_bar = Some(ProgressBar::new(dicom_files.len() as u64));
if let Some(pb) = progress_bar.as_ref() {
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40} {pos}/{len} {wide_msg}")
.expect("Invalid progress bar template"),
);
pb.enable_steady_tick(Duration::new(0, 480_000_000));
};
} else {
progress_bar = None;
}
for file in dicom_files {
if let (Some(pc_selected), Some(ts_uid_selected)) = (file.pc_selected, file.ts_selected) {
if let Some(pb) = &progress_bar {
pb.set_message(file.sop_instance_uid.clone());
}
let cmd = store_req_command(&file.sop_class_uid, &file.sop_instance_uid, message_id);
let mut cmd_data = Vec::with_capacity(128);
cmd.write_dataset_with_ts(
&mut cmd_data,
&dicom_transfer_syntax_registry::entries::IMPLICIT_VR_LITTLE_ENDIAN.erased(),
)
.context(CreateCommandSnafu)?;
let mut object_data = Vec::with_capacity(2048);
let dicom_file =
open_file(&file.file).whatever_context("Could not open listed DICOM file")?;
let ts_selected = TransferSyntaxRegistry
.get(&ts_uid_selected)
|
let nbytes = cmd_data.len() + object_data.len();
if verbose {
info!(
"Sending file {} (~ {} kB), uid={}, sop={}, ts={}",
file.file.display(),
nbytes / 1_000,
&file.sop_instance_uid,
&file.sop_class_uid,
ts_uid_selected,
);
}
if nbytes < scu.acceptor_max_pdu_length().saturating_sub(100) as usize {
let pdu = Pdu::PData {
data: vec![
PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Command,
is_last: true,
data: cmd_data,
},
PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Data,
is_last: true,
data: object_data,
},
],
};
scu.send(&pdu)
.whatever_context("Failed to send C-STORE-RQ")?;
} else {
let pdu = Pdu::PData {
data: vec![PDataValue {
presentation_context_id: pc_selected.id,
value_type: PDataValueType::Command,
is_last: true,
data: cmd_data,
}],
};
scu.send(&pdu)
.whatever_context("Failed to send C-STORE-RQ command")?;
{
let mut pdata = scu.send_pdata(pc_selected.id);
pdata
.write_all(&object_data)
.whatever_context("Failed to send C-STORE-RQ P-Data")?;
}
}
if verbose {
debug!("Awaiting response...");
}
let rsp_pdu = scu
.receive()
.whatever_context("Failed to receive C-STORE-RSP")?;
match rsp_pdu {
Pdu::PData { data } => {
let data_value = &data[0];
let cmd_obj = InMemDicomObject::read_dataset_with_ts(
&data_value.data[..],
&dicom_transfer_syntax_registry::entries::IMPLICIT_VR_LITTLE_ENDIAN
.erased(),
)
.whatever_context("Could not read response from SCP")?;
if verbose {
debug!("Full response: {:?}", cmd_obj);
}
let status = cmd_obj
.element(tags::STATUS)
.whatever_context("Could not find status code in response")?
.to_int::<u16>()
.whatever_context("Status code in response is not a valid integer")?;
let storage_sop_instance_uid = file
.sop_instance_uid
.trim_end_matches(|c: char| c.is_whitespace() || c == '\0');
match status {
// Success
0 => {
if verbose {
info!("Successfully stored instance {}", storage_sop_instance_uid);
}
}
// Warning
1 | 0x0107 | 0x0116 | 0xB000..=0xBFFF => {
warn!(
"Possible issue storing instance `{}` (status code {:04X}H)",
storage_sop_instance_uid, status
);
}
0xFF00 | 0xFF01 => {
warn!(
"Possible issue storing instance `{}`: status is pending (status code {:04X}H)",
storage_sop_instance_uid, status
);
}
0xFE00 => {
error!(
"Could not store instance `{}`: operation cancelled",
storage_sop_instance_uid
);
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
_ => {
error!(
"Failed to store instance `{}` (status code {:04X}H)",
storage_sop_instance_uid, status
);
if fail_first {
let _ = scu.abort();
std::process::exit(-2);
}
}
}
}
pdu @ Pdu::Unknown {.. }
| pdu @ Pdu::AssociationRQ {.. }
| pdu @ Pdu::AssociationAC {.. }
| pdu @ Pdu::AssociationRJ {.. }
| pdu @ Pdu::ReleaseRQ
| pdu @ Pdu::ReleaseRP
| pdu @ Pdu::AbortRQ {.. } => {
error!("Unexpected SCP response: {:?}", pdu);
let _ = scu.abort();
std::process::exit(-2);
}
}
}
if let Some(pb) = progress_bar.as_ref() {
pb.inc(1)
};
}
if let Some(pb) = progress_bar {
pb.finish_with_message("done")
};
scu.release()
.whatever_context("Failed to release SCU association")?;
Ok(())
}
fn store_req_command(
storage_sop_class_uid: &str,
storage_sop_instance_uid: &str,
message_id: u16,
) -> InMemDicomObject<StandardDataDictionary> {
InMemDicomObject::command_from_element_iter([
// SOP Class UID
DataElement::new(
tags::AFFECTED_SOP_CLASS_UID,
VR::UI,
dicom_value!(Str, storage_sop_class_uid),
),
// command field
DataElement::new(tags::COMMAND_FIELD, VR::US, dicom_value!(U16, [0x0001])),
// message ID
DataElement::new(tags::MESSAGE_ID, VR::US, dicom_value!(U16, [message_id])),
//priority
DataElement::new(tags::PRIORITY, VR::US, dicom_value!(U16, [0x0000])),
// data set type
DataElement::new(
tags::COMMAND_DATA_SET_TYPE,
VR::US,
dicom_value!(U16, [0x0000]),
),
// affected SOP Instance UID
DataElement::new(
tags::AFFECTED_SOP_INSTANCE_UID,
VR::UI,
dicom_value!(Str, storage_sop_instance_uid),
),
])
}
fn check_file(file: &Path) -> Result<DicomFile, Error> {
// Ignore DICOMDIR files until better support is added
let _ = (file.file_name()!= Some(OsStr::new("DICOMDIR")))
.then_some(false)
.whatever_context("DICOMDIR file not supported")?;
let dicom_file = dicom_object::OpenFileOptions::new()
.read_until(Tag(0x0001, 0x000))
.open_file(file)
.with_whatever_context(|_| format!("Could not open DICOM file {}", file.display()))?;
let meta = dicom_file.meta();
let storage_sop_class_uid = &meta.media_storage_sop_class_uid;
let storage_sop_instance_uid = &meta.media_storage_sop_instance_uid;
let transfer_syntax_uid = &meta.transfer_syntax.trim_end_matches('\0');
let ts = TransferSyntaxRegistry
.get(transfer_syntax_uid)
.whatever_context("Unsupported file transfer syntax")?;
Ok(DicomFile {
file: file.to_path_buf(),
sop_class_uid: storage_sop_class_uid.to_string(),
sop_instance_uid: storage_sop_instance_uid.to_string(),
file_transfer_syntax: String::from(ts.uid()),
ts_selected: None,
pc_selected: None,
})
}
fn check_presentation_contexts(
file: &DicomFile,
pcs: &[dicom_ul::pdu::PresentationContextResult],
) -> Result<(dicom_ul::pdu::PresentationContextResult, String), Error> {
let file_ts = TransferSyntaxRegistry
.get(&file.file_transfer_syntax)
.whatever_context("Unsupported file transfer syntax")?;
// TODO(#106) transfer syntax conversion is currently not supported
let pc = pcs
.iter()
.
|
.whatever_context("Unsupported file transfer syntax")?;
dicom_file
.write_dataset_with_ts(&mut object_data, ts_selected)
.whatever_context("Could not write object dataset")?;
|
random_line_split
|
args.rs
|
//! Handle `cargo add` arguments
use cargo_edit::{find, registry_url, Dependency};
use cargo_edit::{get_latest_dependency, CrateName};
use semver;
use std::path::PathBuf;
use structopt::StructOpt;
use crate::errors::*;
#[derive(Debug, StructOpt)]
#[structopt(bin_name = "cargo")]
pub enum
|
{
/// Add dependency to a Cargo.toml manifest file.
#[structopt(name = "add")]
#[structopt(
after_help = "This command allows you to add a dependency to a Cargo.toml manifest file. If <crate> is a github
or gitlab repository URL, or a local path, `cargo add` will try to automatically get the crate name
and set the appropriate `--git` or `--path` value.
Please note that Cargo treats versions like '1.2.3' as '^1.2.3' (and that '^1.2.3' is specified
as '>=1.2.3 and <2.0.0'). By default, `cargo add` will use this format, as it is the one that the
crates.io registry suggests. One goal of `cargo add` is to prevent you from using wildcard
dependencies (version set to '*')."
)]
Add(Args),
}
#[derive(Debug, StructOpt)]
pub struct Args {
/// Crates to be added.
#[structopt(name = "crate", required = true)]
pub crates: Vec<String>,
/// Rename a dependency in Cargo.toml,
/// https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#renaming-dependencies-in-cargotoml
/// Only works when specifying a single dependency.
#[structopt(long = "rename", short = "r")]
pub rename: Option<String>,
/// Add crate as development dependency.
#[structopt(long = "dev", short = "D", conflicts_with = "build")]
pub dev: bool,
/// Add crate as build dependency.
#[structopt(long = "build", short = "B", conflicts_with = "dev")]
pub build: bool,
/// Specify the version to grab from the registry(crates.io).
/// You can also specify version as part of name, e.g
/// `cargo add [email protected]`.
#[structopt(long = "vers", value_name = "uri", conflicts_with = "git")]
pub vers: Option<String>,
/// Specify a git repository to download the crate from.
#[structopt(
long = "git",
value_name = "uri",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub git: Option<String>,
/// Specify a git branch to download the crate from.
#[structopt(
long = "branch",
value_name = "branch",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub branch: Option<String>,
/// Specify the path the crate should be loaded from.
#[structopt(long = "path", conflicts_with = "git")]
pub path: Option<PathBuf>,
/// Add as dependency to the given target platform.
#[structopt(long = "target", conflicts_with = "dev", conflicts_with = "build")]
pub target: Option<String>,
/// Add as an optional dependency (for use in features).
#[structopt(long = "optional", conflicts_with = "dev", conflicts_with = "build")]
pub optional: bool,
/// Path to the manifest to add a dependency to.
#[structopt(long = "manifest-path", value_name = "path", conflicts_with = "pkgid")]
pub manifest_path: Option<PathBuf>,
/// Package id of the crate to add this dependency to.
#[structopt(
long = "package",
short = "p",
value_name = "pkgid",
conflicts_with = "path"
)]
pub pkgid: Option<String>,
/// Choose method of semantic version upgrade. Must be one of "none" (exact version, `=`
/// modifier), "patch" (`~` modifier), "minor" (`^` modifier), "all" (`>=`), or "default" (no
/// modifier).
#[structopt(
long = "upgrade",
value_name = "method",
possible_value = "none",
possible_value = "patch",
possible_value = "minor",
possible_value = "all",
possible_value = "default",
default_value = "default"
)]
pub upgrade: String,
/// Include prerelease versions when fetching from crates.io (e.g.
/// '0.6.0-alpha').
#[structopt(long = "allow-prerelease")]
pub allow_prerelease: bool,
/// Space-separated list of features to add. For an alternative approach to
/// enabling features, consider installing the `cargo-feature` utility.
#[structopt(long = "features", number_of_values = 1)]
pub features: Option<Vec<String>>,
/// Set `default-features = false` for the added dependency.
#[structopt(long = "no-default-features")]
pub no_default_features: bool,
/// Do not print any output in case of success.
#[structopt(long = "quiet", short = "q")]
pub quiet: bool,
/// Run without accessing the network
#[structopt(long = "offline")]
pub offline: bool,
/// Keep dependencies sorted
#[structopt(long = "sort", short = "s")]
pub sort: bool,
/// Registry to use
#[structopt(long = "registry", conflicts_with = "git", conflicts_with = "path")]
pub registry: Option<String>,
}
fn parse_version_req(s: &str) -> Result<&str> {
semver::VersionReq::parse(s).chain_err(|| "Invalid dependency version requirement")?;
Ok(s)
}
impl Args {
/// Get dependency section
pub fn get_section(&self) -> Vec<String> {
if self.dev {
vec!["dev-dependencies".to_owned()]
} else if self.build {
vec!["build-dependencies".to_owned()]
} else if let Some(ref target) = self.target {
if target.is_empty() {
panic!("Target specification may not be empty");
}
vec![
"target".to_owned(),
target.clone(),
"dependencies".to_owned(),
]
} else {
vec!["dependencies".to_owned()]
}
}
fn parse_single_dependency(&self, crate_name: &str) -> Result<Dependency> {
let crate_name = CrateName::new(crate_name);
if let Some(mut dependency) = crate_name.parse_as_version()? {
// crate specifier includes a version (e.g. `[email protected]`)
if let Some(ref url) = self.git {
let url = url.clone();
let version = dependency.version().unwrap().to_string();
return Err(ErrorKind::GitUrlWithVersion(url, version).into());
}
if let Some(ref path) = self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
Ok(dependency)
} else if crate_name.is_url_or_path() {
Ok(crate_name.parse_crate_name_from_uri()?)
} else {
assert_eq!(self.git.is_some() && self.vers.is_some(), false);
assert_eq!(self.git.is_some() && self.path.is_some(), false);
assert_eq!(self.git.is_some() && self.registry.is_some(), false);
assert_eq!(self.path.is_some() && self.registry.is_some(), false);
let mut dependency = Dependency::new(crate_name.name());
if let Some(repo) = &self.git {
dependency = dependency.set_git(repo, self.branch.clone());
}
if let Some(path) = &self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
if let Some(version) = &self.vers {
dependency = dependency.set_version(parse_version_req(version)?);
}
let registry_url = if let Some(registry) = &self.registry {
Some(registry_url(&find(&self.manifest_path)?, Some(registry))?)
} else {
None
};
if self.git.is_none() && self.path.is_none() && self.vers.is_none() {
let dep = get_latest_dependency(
crate_name.name(),
self.allow_prerelease,
&find(&self.manifest_path)?,
®istry_url,
)?;
let v = format!(
"{prefix}{version}",
prefix = self.get_upgrade_prefix(),
// If version is unavailable `get_latest_dependency` must have
// returned `Err(FetchVersionError::GetVersion)`
version = dep.version().unwrap_or_else(|| unreachable!())
);
dependency = dep.set_version(&v);
}
// Set the registry after getting the latest version as
// get_latest_dependency returns a registry-less Dependency
if let Some(registry) = &self.registry {
dependency = dependency.set_registry(registry);
}
Ok(dependency)
}
}
/// Build dependencies from arguments
pub fn parse_dependencies(&self) -> Result<Vec<Dependency>> {
if self.crates.len() > 1
&& (self.git.is_some() || self.path.is_some() || self.vers.is_some())
{
return Err(ErrorKind::MultipleCratesWithGitOrPathOrVers.into());
}
if self.crates.len() > 1 && self.rename.is_some() {
return Err(ErrorKind::MultipleCratesWithRename.into());
}
if self.crates.len() > 1 && self.features.is_some() {
return Err(ErrorKind::MultipleCratesWithFeatures.into());
}
self.crates
.iter()
.map(|crate_name| {
self.parse_single_dependency(crate_name).map(|x| {
let mut x = x
.set_optional(self.optional)
.set_features(self.features.clone())
.set_default_features(!self.no_default_features);
if let Some(ref rename) = self.rename {
x = x.set_rename(rename);
}
x
})
})
.collect()
}
fn get_upgrade_prefix(&self) -> &'static str {
match self.upgrade.as_ref() {
"default" => "",
"none" => "=",
"patch" => "~",
"minor" => "^",
"all" => ">=",
_ => unreachable!(),
}
}
}
#[cfg(test)]
impl Default for Args {
fn default() -> Args {
Args {
crates: vec!["demo".to_owned()],
rename: None,
dev: false,
build: false,
vers: None,
git: None,
branch: None,
path: None,
target: None,
optional: false,
manifest_path: None,
pkgid: None,
upgrade: "minor".to_string(),
allow_prerelease: false,
features: None,
no_default_features: false,
quiet: false,
offline: true,
sort: false,
registry: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use cargo_edit::Dependency;
#[test]
fn test_dependency_parsing() {
let args = Args {
vers: Some("0.4.2".to_owned()),
..Args::default()
};
assert_eq!(
args.parse_dependencies().unwrap(),
vec![Dependency::new("demo").set_version("0.4.2")]
);
}
#[test]
#[cfg(feature = "test-external-apis")]
fn test_repo_as_arg_parsing() {
let github_url = "https://github.com/killercup/cargo-edit/";
let args_github = Args {
crates: vec![github_url.to_owned()],
..Args::default()
};
assert_eq!(
args_github.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_git(github_url, None)]
);
let gitlab_url = "https://gitlab.com/Polly-lang/Polly.git";
let args_gitlab = Args {
crates: vec![gitlab_url.to_owned()],
..Args::default()
};
assert_eq!(
args_gitlab.parse_dependencies().unwrap(),
vec![Dependency::new("polly").set_git(gitlab_url, None)]
);
}
#[test]
fn test_path_as_arg_parsing() {
let self_path = ".";
let args_path = Args {
crates: vec![self_path.to_owned()],
..Args::default()
};
assert_eq!(
args_path.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_path(self_path)]
);
}
}
|
Command
|
identifier_name
|
args.rs
|
//! Handle `cargo add` arguments
use cargo_edit::{find, registry_url, Dependency};
use cargo_edit::{get_latest_dependency, CrateName};
use semver;
use std::path::PathBuf;
use structopt::StructOpt;
use crate::errors::*;
#[derive(Debug, StructOpt)]
#[structopt(bin_name = "cargo")]
pub enum Command {
/// Add dependency to a Cargo.toml manifest file.
#[structopt(name = "add")]
#[structopt(
after_help = "This command allows you to add a dependency to a Cargo.toml manifest file. If <crate> is a github
or gitlab repository URL, or a local path, `cargo add` will try to automatically get the crate name
and set the appropriate `--git` or `--path` value.
Please note that Cargo treats versions like '1.2.3' as '^1.2.3' (and that '^1.2.3' is specified
as '>=1.2.3 and <2.0.0'). By default, `cargo add` will use this format, as it is the one that the
crates.io registry suggests. One goal of `cargo add` is to prevent you from using wildcard
dependencies (version set to '*')."
)]
Add(Args),
}
#[derive(Debug, StructOpt)]
pub struct Args {
/// Crates to be added.
#[structopt(name = "crate", required = true)]
pub crates: Vec<String>,
/// Rename a dependency in Cargo.toml,
/// https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#renaming-dependencies-in-cargotoml
/// Only works when specifying a single dependency.
#[structopt(long = "rename", short = "r")]
pub rename: Option<String>,
/// Add crate as development dependency.
#[structopt(long = "dev", short = "D", conflicts_with = "build")]
pub dev: bool,
/// Add crate as build dependency.
#[structopt(long = "build", short = "B", conflicts_with = "dev")]
pub build: bool,
/// Specify the version to grab from the registry(crates.io).
/// You can also specify version as part of name, e.g
/// `cargo add [email protected]`.
#[structopt(long = "vers", value_name = "uri", conflicts_with = "git")]
pub vers: Option<String>,
/// Specify a git repository to download the crate from.
#[structopt(
long = "git",
value_name = "uri",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub git: Option<String>,
/// Specify a git branch to download the crate from.
#[structopt(
long = "branch",
value_name = "branch",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub branch: Option<String>,
/// Specify the path the crate should be loaded from.
#[structopt(long = "path", conflicts_with = "git")]
pub path: Option<PathBuf>,
/// Add as dependency to the given target platform.
#[structopt(long = "target", conflicts_with = "dev", conflicts_with = "build")]
pub target: Option<String>,
/// Add as an optional dependency (for use in features).
#[structopt(long = "optional", conflicts_with = "dev", conflicts_with = "build")]
pub optional: bool,
/// Path to the manifest to add a dependency to.
#[structopt(long = "manifest-path", value_name = "path", conflicts_with = "pkgid")]
pub manifest_path: Option<PathBuf>,
/// Package id of the crate to add this dependency to.
#[structopt(
long = "package",
short = "p",
value_name = "pkgid",
conflicts_with = "path"
)]
pub pkgid: Option<String>,
/// Choose method of semantic version upgrade. Must be one of "none" (exact version, `=`
/// modifier), "patch" (`~` modifier), "minor" (`^` modifier), "all" (`>=`), or "default" (no
/// modifier).
#[structopt(
long = "upgrade",
value_name = "method",
possible_value = "none",
possible_value = "patch",
possible_value = "minor",
possible_value = "all",
possible_value = "default",
default_value = "default"
)]
pub upgrade: String,
/// Include prerelease versions when fetching from crates.io (e.g.
/// '0.6.0-alpha').
#[structopt(long = "allow-prerelease")]
pub allow_prerelease: bool,
/// Space-separated list of features to add. For an alternative approach to
/// enabling features, consider installing the `cargo-feature` utility.
#[structopt(long = "features", number_of_values = 1)]
pub features: Option<Vec<String>>,
/// Set `default-features = false` for the added dependency.
#[structopt(long = "no-default-features")]
pub no_default_features: bool,
/// Do not print any output in case of success.
#[structopt(long = "quiet", short = "q")]
pub quiet: bool,
/// Run without accessing the network
#[structopt(long = "offline")]
pub offline: bool,
/// Keep dependencies sorted
#[structopt(long = "sort", short = "s")]
pub sort: bool,
/// Registry to use
#[structopt(long = "registry", conflicts_with = "git", conflicts_with = "path")]
pub registry: Option<String>,
}
fn parse_version_req(s: &str) -> Result<&str> {
semver::VersionReq::parse(s).chain_err(|| "Invalid dependency version requirement")?;
Ok(s)
}
impl Args {
/// Get dependency section
pub fn get_section(&self) -> Vec<String> {
if self.dev {
vec!["dev-dependencies".to_owned()]
} else if self.build {
vec!["build-dependencies".to_owned()]
} else if let Some(ref target) = self.target {
if target.is_empty() {
panic!("Target specification may not be empty");
}
vec![
"target".to_owned(),
target.clone(),
"dependencies".to_owned(),
]
} else {
vec!["dependencies".to_owned()]
}
}
fn parse_single_dependency(&self, crate_name: &str) -> Result<Dependency> {
let crate_name = CrateName::new(crate_name);
if let Some(mut dependency) = crate_name.parse_as_version()? {
// crate specifier includes a version (e.g. `[email protected]`)
if let Some(ref url) = self.git {
let url = url.clone();
let version = dependency.version().unwrap().to_string();
return Err(ErrorKind::GitUrlWithVersion(url, version).into());
}
if let Some(ref path) = self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
Ok(dependency)
} else if crate_name.is_url_or_path() {
Ok(crate_name.parse_crate_name_from_uri()?)
} else
|
None
};
if self.git.is_none() && self.path.is_none() && self.vers.is_none() {
let dep = get_latest_dependency(
crate_name.name(),
self.allow_prerelease,
&find(&self.manifest_path)?,
®istry_url,
)?;
let v = format!(
"{prefix}{version}",
prefix = self.get_upgrade_prefix(),
// If version is unavailable `get_latest_dependency` must have
// returned `Err(FetchVersionError::GetVersion)`
version = dep.version().unwrap_or_else(|| unreachable!())
);
dependency = dep.set_version(&v);
}
// Set the registry after getting the latest version as
// get_latest_dependency returns a registry-less Dependency
if let Some(registry) = &self.registry {
dependency = dependency.set_registry(registry);
}
Ok(dependency)
}
}
/// Build dependencies from arguments
pub fn parse_dependencies(&self) -> Result<Vec<Dependency>> {
if self.crates.len() > 1
&& (self.git.is_some() || self.path.is_some() || self.vers.is_some())
{
return Err(ErrorKind::MultipleCratesWithGitOrPathOrVers.into());
}
if self.crates.len() > 1 && self.rename.is_some() {
return Err(ErrorKind::MultipleCratesWithRename.into());
}
if self.crates.len() > 1 && self.features.is_some() {
return Err(ErrorKind::MultipleCratesWithFeatures.into());
}
self.crates
.iter()
.map(|crate_name| {
self.parse_single_dependency(crate_name).map(|x| {
let mut x = x
.set_optional(self.optional)
.set_features(self.features.clone())
.set_default_features(!self.no_default_features);
if let Some(ref rename) = self.rename {
x = x.set_rename(rename);
}
x
})
})
.collect()
}
fn get_upgrade_prefix(&self) -> &'static str {
match self.upgrade.as_ref() {
"default" => "",
"none" => "=",
"patch" => "~",
"minor" => "^",
"all" => ">=",
_ => unreachable!(),
}
}
}
#[cfg(test)]
impl Default for Args {
fn default() -> Args {
Args {
crates: vec!["demo".to_owned()],
rename: None,
dev: false,
build: false,
vers: None,
git: None,
branch: None,
path: None,
target: None,
optional: false,
manifest_path: None,
pkgid: None,
upgrade: "minor".to_string(),
allow_prerelease: false,
features: None,
no_default_features: false,
quiet: false,
offline: true,
sort: false,
registry: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use cargo_edit::Dependency;
#[test]
fn test_dependency_parsing() {
let args = Args {
vers: Some("0.4.2".to_owned()),
..Args::default()
};
assert_eq!(
args.parse_dependencies().unwrap(),
vec![Dependency::new("demo").set_version("0.4.2")]
);
}
#[test]
#[cfg(feature = "test-external-apis")]
fn test_repo_as_arg_parsing() {
let github_url = "https://github.com/killercup/cargo-edit/";
let args_github = Args {
crates: vec![github_url.to_owned()],
..Args::default()
};
assert_eq!(
args_github.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_git(github_url, None)]
);
let gitlab_url = "https://gitlab.com/Polly-lang/Polly.git";
let args_gitlab = Args {
crates: vec![gitlab_url.to_owned()],
..Args::default()
};
assert_eq!(
args_gitlab.parse_dependencies().unwrap(),
vec![Dependency::new("polly").set_git(gitlab_url, None)]
);
}
#[test]
fn test_path_as_arg_parsing() {
let self_path = ".";
let args_path = Args {
crates: vec![self_path.to_owned()],
..Args::default()
};
assert_eq!(
args_path.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_path(self_path)]
);
}
}
|
{
assert_eq!(self.git.is_some() && self.vers.is_some(), false);
assert_eq!(self.git.is_some() && self.path.is_some(), false);
assert_eq!(self.git.is_some() && self.registry.is_some(), false);
assert_eq!(self.path.is_some() && self.registry.is_some(), false);
let mut dependency = Dependency::new(crate_name.name());
if let Some(repo) = &self.git {
dependency = dependency.set_git(repo, self.branch.clone());
}
if let Some(path) = &self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
if let Some(version) = &self.vers {
dependency = dependency.set_version(parse_version_req(version)?);
}
let registry_url = if let Some(registry) = &self.registry {
Some(registry_url(&find(&self.manifest_path)?, Some(registry))?)
} else {
|
conditional_block
|
args.rs
|
//! Handle `cargo add` arguments
use cargo_edit::{find, registry_url, Dependency};
use cargo_edit::{get_latest_dependency, CrateName};
use semver;
use std::path::PathBuf;
use structopt::StructOpt;
use crate::errors::*;
#[derive(Debug, StructOpt)]
#[structopt(bin_name = "cargo")]
pub enum Command {
/// Add dependency to a Cargo.toml manifest file.
#[structopt(name = "add")]
#[structopt(
after_help = "This command allows you to add a dependency to a Cargo.toml manifest file. If <crate> is a github
or gitlab repository URL, or a local path, `cargo add` will try to automatically get the crate name
and set the appropriate `--git` or `--path` value.
Please note that Cargo treats versions like '1.2.3' as '^1.2.3' (and that '^1.2.3' is specified
as '>=1.2.3 and <2.0.0'). By default, `cargo add` will use this format, as it is the one that the
crates.io registry suggests. One goal of `cargo add` is to prevent you from using wildcard
dependencies (version set to '*')."
)]
Add(Args),
}
#[derive(Debug, StructOpt)]
pub struct Args {
/// Crates to be added.
#[structopt(name = "crate", required = true)]
pub crates: Vec<String>,
/// Rename a dependency in Cargo.toml,
/// https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#renaming-dependencies-in-cargotoml
/// Only works when specifying a single dependency.
#[structopt(long = "rename", short = "r")]
pub rename: Option<String>,
/// Add crate as development dependency.
#[structopt(long = "dev", short = "D", conflicts_with = "build")]
pub dev: bool,
/// Add crate as build dependency.
#[structopt(long = "build", short = "B", conflicts_with = "dev")]
pub build: bool,
/// Specify the version to grab from the registry(crates.io).
/// You can also specify version as part of name, e.g
/// `cargo add [email protected]`.
#[structopt(long = "vers", value_name = "uri", conflicts_with = "git")]
pub vers: Option<String>,
/// Specify a git repository to download the crate from.
#[structopt(
long = "git",
value_name = "uri",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub git: Option<String>,
/// Specify a git branch to download the crate from.
#[structopt(
long = "branch",
value_name = "branch",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub branch: Option<String>,
/// Specify the path the crate should be loaded from.
#[structopt(long = "path", conflicts_with = "git")]
pub path: Option<PathBuf>,
/// Add as dependency to the given target platform.
#[structopt(long = "target", conflicts_with = "dev", conflicts_with = "build")]
pub target: Option<String>,
/// Add as an optional dependency (for use in features).
#[structopt(long = "optional", conflicts_with = "dev", conflicts_with = "build")]
pub optional: bool,
/// Path to the manifest to add a dependency to.
#[structopt(long = "manifest-path", value_name = "path", conflicts_with = "pkgid")]
pub manifest_path: Option<PathBuf>,
/// Package id of the crate to add this dependency to.
#[structopt(
long = "package",
short = "p",
value_name = "pkgid",
conflicts_with = "path"
)]
pub pkgid: Option<String>,
/// Choose method of semantic version upgrade. Must be one of "none" (exact version, `=`
/// modifier), "patch" (`~` modifier), "minor" (`^` modifier), "all" (`>=`), or "default" (no
/// modifier).
#[structopt(
long = "upgrade",
value_name = "method",
possible_value = "none",
possible_value = "patch",
possible_value = "minor",
possible_value = "all",
possible_value = "default",
default_value = "default"
)]
pub upgrade: String,
/// Include prerelease versions when fetching from crates.io (e.g.
/// '0.6.0-alpha').
#[structopt(long = "allow-prerelease")]
pub allow_prerelease: bool,
/// Space-separated list of features to add. For an alternative approach to
/// enabling features, consider installing the `cargo-feature` utility.
#[structopt(long = "features", number_of_values = 1)]
pub features: Option<Vec<String>>,
/// Set `default-features = false` for the added dependency.
#[structopt(long = "no-default-features")]
pub no_default_features: bool,
/// Do not print any output in case of success.
#[structopt(long = "quiet", short = "q")]
pub quiet: bool,
/// Run without accessing the network
#[structopt(long = "offline")]
pub offline: bool,
/// Keep dependencies sorted
#[structopt(long = "sort", short = "s")]
pub sort: bool,
/// Registry to use
#[structopt(long = "registry", conflicts_with = "git", conflicts_with = "path")]
pub registry: Option<String>,
}
fn parse_version_req(s: &str) -> Result<&str> {
semver::VersionReq::parse(s).chain_err(|| "Invalid dependency version requirement")?;
Ok(s)
}
impl Args {
/// Get dependency section
pub fn get_section(&self) -> Vec<String> {
if self.dev {
vec!["dev-dependencies".to_owned()]
} else if self.build {
vec!["build-dependencies".to_owned()]
} else if let Some(ref target) = self.target {
if target.is_empty() {
panic!("Target specification may not be empty");
}
vec![
"target".to_owned(),
target.clone(),
"dependencies".to_owned(),
]
} else {
vec!["dependencies".to_owned()]
}
}
fn parse_single_dependency(&self, crate_name: &str) -> Result<Dependency> {
let crate_name = CrateName::new(crate_name);
if let Some(mut dependency) = crate_name.parse_as_version()? {
// crate specifier includes a version (e.g. `[email protected]`)
if let Some(ref url) = self.git {
let url = url.clone();
let version = dependency.version().unwrap().to_string();
return Err(ErrorKind::GitUrlWithVersion(url, version).into());
}
if let Some(ref path) = self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
Ok(dependency)
} else if crate_name.is_url_or_path() {
Ok(crate_name.parse_crate_name_from_uri()?)
} else {
assert_eq!(self.git.is_some() && self.vers.is_some(), false);
assert_eq!(self.git.is_some() && self.path.is_some(), false);
assert_eq!(self.git.is_some() && self.registry.is_some(), false);
assert_eq!(self.path.is_some() && self.registry.is_some(), false);
let mut dependency = Dependency::new(crate_name.name());
if let Some(repo) = &self.git {
dependency = dependency.set_git(repo, self.branch.clone());
}
if let Some(path) = &self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
if let Some(version) = &self.vers {
dependency = dependency.set_version(parse_version_req(version)?);
}
let registry_url = if let Some(registry) = &self.registry {
Some(registry_url(&find(&self.manifest_path)?, Some(registry))?)
} else {
None
};
if self.git.is_none() && self.path.is_none() && self.vers.is_none() {
let dep = get_latest_dependency(
crate_name.name(),
self.allow_prerelease,
&find(&self.manifest_path)?,
®istry_url,
)?;
let v = format!(
"{prefix}{version}",
prefix = self.get_upgrade_prefix(),
// If version is unavailable `get_latest_dependency` must have
// returned `Err(FetchVersionError::GetVersion)`
version = dep.version().unwrap_or_else(|| unreachable!())
);
dependency = dep.set_version(&v);
}
// Set the registry after getting the latest version as
// get_latest_dependency returns a registry-less Dependency
if let Some(registry) = &self.registry {
dependency = dependency.set_registry(registry);
}
Ok(dependency)
}
}
/// Build dependencies from arguments
pub fn parse_dependencies(&self) -> Result<Vec<Dependency>>
|
.set_optional(self.optional)
.set_features(self.features.clone())
.set_default_features(!self.no_default_features);
if let Some(ref rename) = self.rename {
x = x.set_rename(rename);
}
x
})
})
.collect()
}
fn get_upgrade_prefix(&self) -> &'static str {
match self.upgrade.as_ref() {
"default" => "",
"none" => "=",
"patch" => "~",
"minor" => "^",
"all" => ">=",
_ => unreachable!(),
}
}
}
#[cfg(test)]
impl Default for Args {
fn default() -> Args {
Args {
crates: vec!["demo".to_owned()],
rename: None,
dev: false,
build: false,
vers: None,
git: None,
branch: None,
path: None,
target: None,
optional: false,
manifest_path: None,
pkgid: None,
upgrade: "minor".to_string(),
allow_prerelease: false,
features: None,
no_default_features: false,
quiet: false,
offline: true,
sort: false,
registry: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use cargo_edit::Dependency;
#[test]
fn test_dependency_parsing() {
let args = Args {
vers: Some("0.4.2".to_owned()),
..Args::default()
};
assert_eq!(
args.parse_dependencies().unwrap(),
vec![Dependency::new("demo").set_version("0.4.2")]
);
}
#[test]
#[cfg(feature = "test-external-apis")]
fn test_repo_as_arg_parsing() {
let github_url = "https://github.com/killercup/cargo-edit/";
let args_github = Args {
crates: vec![github_url.to_owned()],
..Args::default()
};
assert_eq!(
args_github.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_git(github_url, None)]
);
let gitlab_url = "https://gitlab.com/Polly-lang/Polly.git";
let args_gitlab = Args {
crates: vec![gitlab_url.to_owned()],
..Args::default()
};
assert_eq!(
args_gitlab.parse_dependencies().unwrap(),
vec![Dependency::new("polly").set_git(gitlab_url, None)]
);
}
#[test]
fn test_path_as_arg_parsing() {
let self_path = ".";
let args_path = Args {
crates: vec![self_path.to_owned()],
..Args::default()
};
assert_eq!(
args_path.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_path(self_path)]
);
}
}
|
{
if self.crates.len() > 1
&& (self.git.is_some() || self.path.is_some() || self.vers.is_some())
{
return Err(ErrorKind::MultipleCratesWithGitOrPathOrVers.into());
}
if self.crates.len() > 1 && self.rename.is_some() {
return Err(ErrorKind::MultipleCratesWithRename.into());
}
if self.crates.len() > 1 && self.features.is_some() {
return Err(ErrorKind::MultipleCratesWithFeatures.into());
}
self.crates
.iter()
.map(|crate_name| {
self.parse_single_dependency(crate_name).map(|x| {
let mut x = x
|
identifier_body
|
args.rs
|
//! Handle `cargo add` arguments
use cargo_edit::{find, registry_url, Dependency};
use cargo_edit::{get_latest_dependency, CrateName};
use semver;
use std::path::PathBuf;
use structopt::StructOpt;
use crate::errors::*;
#[derive(Debug, StructOpt)]
#[structopt(bin_name = "cargo")]
pub enum Command {
/// Add dependency to a Cargo.toml manifest file.
#[structopt(name = "add")]
#[structopt(
after_help = "This command allows you to add a dependency to a Cargo.toml manifest file. If <crate> is a github
or gitlab repository URL, or a local path, `cargo add` will try to automatically get the crate name
and set the appropriate `--git` or `--path` value.
Please note that Cargo treats versions like '1.2.3' as '^1.2.3' (and that '^1.2.3' is specified
as '>=1.2.3 and <2.0.0'). By default, `cargo add` will use this format, as it is the one that the
crates.io registry suggests. One goal of `cargo add` is to prevent you from using wildcard
dependencies (version set to '*')."
)]
Add(Args),
}
#[derive(Debug, StructOpt)]
pub struct Args {
/// Crates to be added.
#[structopt(name = "crate", required = true)]
pub crates: Vec<String>,
/// Rename a dependency in Cargo.toml,
/// https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#renaming-dependencies-in-cargotoml
/// Only works when specifying a single dependency.
#[structopt(long = "rename", short = "r")]
pub rename: Option<String>,
/// Add crate as development dependency.
#[structopt(long = "dev", short = "D", conflicts_with = "build")]
pub dev: bool,
/// Add crate as build dependency.
#[structopt(long = "build", short = "B", conflicts_with = "dev")]
pub build: bool,
/// Specify the version to grab from the registry(crates.io).
/// You can also specify version as part of name, e.g
/// `cargo add [email protected]`.
#[structopt(long = "vers", value_name = "uri", conflicts_with = "git")]
pub vers: Option<String>,
/// Specify a git repository to download the crate from.
#[structopt(
long = "git",
value_name = "uri",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub git: Option<String>,
/// Specify a git branch to download the crate from.
#[structopt(
long = "branch",
value_name = "branch",
conflicts_with = "vers",
conflicts_with = "path"
)]
pub branch: Option<String>,
/// Specify the path the crate should be loaded from.
#[structopt(long = "path", conflicts_with = "git")]
pub path: Option<PathBuf>,
|
#[structopt(long = "target", conflicts_with = "dev", conflicts_with = "build")]
pub target: Option<String>,
/// Add as an optional dependency (for use in features).
#[structopt(long = "optional", conflicts_with = "dev", conflicts_with = "build")]
pub optional: bool,
/// Path to the manifest to add a dependency to.
#[structopt(long = "manifest-path", value_name = "path", conflicts_with = "pkgid")]
pub manifest_path: Option<PathBuf>,
/// Package id of the crate to add this dependency to.
#[structopt(
long = "package",
short = "p",
value_name = "pkgid",
conflicts_with = "path"
)]
pub pkgid: Option<String>,
/// Choose method of semantic version upgrade. Must be one of "none" (exact version, `=`
/// modifier), "patch" (`~` modifier), "minor" (`^` modifier), "all" (`>=`), or "default" (no
/// modifier).
#[structopt(
long = "upgrade",
value_name = "method",
possible_value = "none",
possible_value = "patch",
possible_value = "minor",
possible_value = "all",
possible_value = "default",
default_value = "default"
)]
pub upgrade: String,
/// Include prerelease versions when fetching from crates.io (e.g.
/// '0.6.0-alpha').
#[structopt(long = "allow-prerelease")]
pub allow_prerelease: bool,
/// Space-separated list of features to add. For an alternative approach to
/// enabling features, consider installing the `cargo-feature` utility.
#[structopt(long = "features", number_of_values = 1)]
pub features: Option<Vec<String>>,
/// Set `default-features = false` for the added dependency.
#[structopt(long = "no-default-features")]
pub no_default_features: bool,
/// Do not print any output in case of success.
#[structopt(long = "quiet", short = "q")]
pub quiet: bool,
/// Run without accessing the network
#[structopt(long = "offline")]
pub offline: bool,
/// Keep dependencies sorted
#[structopt(long = "sort", short = "s")]
pub sort: bool,
/// Registry to use
#[structopt(long = "registry", conflicts_with = "git", conflicts_with = "path")]
pub registry: Option<String>,
}
fn parse_version_req(s: &str) -> Result<&str> {
semver::VersionReq::parse(s).chain_err(|| "Invalid dependency version requirement")?;
Ok(s)
}
impl Args {
/// Get dependency section
pub fn get_section(&self) -> Vec<String> {
if self.dev {
vec!["dev-dependencies".to_owned()]
} else if self.build {
vec!["build-dependencies".to_owned()]
} else if let Some(ref target) = self.target {
if target.is_empty() {
panic!("Target specification may not be empty");
}
vec![
"target".to_owned(),
target.clone(),
"dependencies".to_owned(),
]
} else {
vec!["dependencies".to_owned()]
}
}
fn parse_single_dependency(&self, crate_name: &str) -> Result<Dependency> {
let crate_name = CrateName::new(crate_name);
if let Some(mut dependency) = crate_name.parse_as_version()? {
// crate specifier includes a version (e.g. `[email protected]`)
if let Some(ref url) = self.git {
let url = url.clone();
let version = dependency.version().unwrap().to_string();
return Err(ErrorKind::GitUrlWithVersion(url, version).into());
}
if let Some(ref path) = self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
Ok(dependency)
} else if crate_name.is_url_or_path() {
Ok(crate_name.parse_crate_name_from_uri()?)
} else {
assert_eq!(self.git.is_some() && self.vers.is_some(), false);
assert_eq!(self.git.is_some() && self.path.is_some(), false);
assert_eq!(self.git.is_some() && self.registry.is_some(), false);
assert_eq!(self.path.is_some() && self.registry.is_some(), false);
let mut dependency = Dependency::new(crate_name.name());
if let Some(repo) = &self.git {
dependency = dependency.set_git(repo, self.branch.clone());
}
if let Some(path) = &self.path {
dependency = dependency.set_path(path.to_str().unwrap());
}
if let Some(version) = &self.vers {
dependency = dependency.set_version(parse_version_req(version)?);
}
let registry_url = if let Some(registry) = &self.registry {
Some(registry_url(&find(&self.manifest_path)?, Some(registry))?)
} else {
None
};
if self.git.is_none() && self.path.is_none() && self.vers.is_none() {
let dep = get_latest_dependency(
crate_name.name(),
self.allow_prerelease,
&find(&self.manifest_path)?,
®istry_url,
)?;
let v = format!(
"{prefix}{version}",
prefix = self.get_upgrade_prefix(),
// If version is unavailable `get_latest_dependency` must have
// returned `Err(FetchVersionError::GetVersion)`
version = dep.version().unwrap_or_else(|| unreachable!())
);
dependency = dep.set_version(&v);
}
// Set the registry after getting the latest version as
// get_latest_dependency returns a registry-less Dependency
if let Some(registry) = &self.registry {
dependency = dependency.set_registry(registry);
}
Ok(dependency)
}
}
/// Build dependencies from arguments
pub fn parse_dependencies(&self) -> Result<Vec<Dependency>> {
if self.crates.len() > 1
&& (self.git.is_some() || self.path.is_some() || self.vers.is_some())
{
return Err(ErrorKind::MultipleCratesWithGitOrPathOrVers.into());
}
if self.crates.len() > 1 && self.rename.is_some() {
return Err(ErrorKind::MultipleCratesWithRename.into());
}
if self.crates.len() > 1 && self.features.is_some() {
return Err(ErrorKind::MultipleCratesWithFeatures.into());
}
self.crates
.iter()
.map(|crate_name| {
self.parse_single_dependency(crate_name).map(|x| {
let mut x = x
.set_optional(self.optional)
.set_features(self.features.clone())
.set_default_features(!self.no_default_features);
if let Some(ref rename) = self.rename {
x = x.set_rename(rename);
}
x
})
})
.collect()
}
fn get_upgrade_prefix(&self) -> &'static str {
match self.upgrade.as_ref() {
"default" => "",
"none" => "=",
"patch" => "~",
"minor" => "^",
"all" => ">=",
_ => unreachable!(),
}
}
}
#[cfg(test)]
impl Default for Args {
fn default() -> Args {
Args {
crates: vec!["demo".to_owned()],
rename: None,
dev: false,
build: false,
vers: None,
git: None,
branch: None,
path: None,
target: None,
optional: false,
manifest_path: None,
pkgid: None,
upgrade: "minor".to_string(),
allow_prerelease: false,
features: None,
no_default_features: false,
quiet: false,
offline: true,
sort: false,
registry: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use cargo_edit::Dependency;
#[test]
fn test_dependency_parsing() {
let args = Args {
vers: Some("0.4.2".to_owned()),
..Args::default()
};
assert_eq!(
args.parse_dependencies().unwrap(),
vec![Dependency::new("demo").set_version("0.4.2")]
);
}
#[test]
#[cfg(feature = "test-external-apis")]
fn test_repo_as_arg_parsing() {
let github_url = "https://github.com/killercup/cargo-edit/";
let args_github = Args {
crates: vec![github_url.to_owned()],
..Args::default()
};
assert_eq!(
args_github.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_git(github_url, None)]
);
let gitlab_url = "https://gitlab.com/Polly-lang/Polly.git";
let args_gitlab = Args {
crates: vec![gitlab_url.to_owned()],
..Args::default()
};
assert_eq!(
args_gitlab.parse_dependencies().unwrap(),
vec![Dependency::new("polly").set_git(gitlab_url, None)]
);
}
#[test]
fn test_path_as_arg_parsing() {
let self_path = ".";
let args_path = Args {
crates: vec![self_path.to_owned()],
..Args::default()
};
assert_eq!(
args_path.parse_dependencies().unwrap(),
vec![Dependency::new("cargo-edit").set_path(self_path)]
);
}
}
|
/// Add as dependency to the given target platform.
|
random_line_split
|
cache.rs
|
use crate::address_map::{ModuleAddressMap, ValueLabelsRanges};
use crate::compilation::{Compilation, Relocations};
use crate::module::Module;
use crate::module_environ::FunctionBodyData;
use core::hash::Hasher;
use cranelift_codegen::{ir, isa};
use cranelift_entity::PrimaryMap;
use cranelift_wasm::DefinedFuncIndex;
use lazy_static::lazy_static;
use log::{debug, trace, warn};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::fs;
use std::io;
use std::path::PathBuf;
use std::string::{String, ToString};
/// Module for configuring the cache system.
pub mod conf {
use directories::ProjectDirs;
use log::{debug, warn};
use spin::Once;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
struct Config {
pub cache_enabled: bool,
pub cache_dir: PathBuf,
pub compression_level: i32,
}
// Private static, so only internal function can access it.
static CONFIG: Once<Config> = Once::new();
static INIT_CALLED: AtomicBool = AtomicBool::new(false);
static DEFAULT_COMPRESSION_LEVEL: i32 = 0; // 0 for zstd means "use default level"
/// Returns true if and only if the cache is enabled.
pub fn cache_enabled() -> bool {
// Not everyone knows about the cache system, i.e. the tests,
// so the default is cache disabled.
CONFIG
.call_once(|| Config::new_cache_disabled())
.cache_enabled
}
/// Returns path to the cache directory.
///
/// Panics if the cache is disabled.
pub fn cache_directory() -> &'static PathBuf {
&CONFIG
.r#try()
.expect("Cache system must be initialized")
.cache_dir
}
/// Returns cache compression level.
///
/// Panics if the cache is disabled.
pub fn compression_level() -> i32 {
CONFIG
.r#try()
.expect("Cache system must be initialized")
.compression_level
}
/// Initializes the cache system. Should be called exactly once,
/// and before using the cache system. Otherwise it can panic.
pub fn init<P: AsRef<Path>>(enabled: bool, dir: Option<P>, compression_level: Option<i32>) {
INIT_CALLED
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.expect("Cache system init must be called at most once");
assert!(
CONFIG.r#try().is_none(),
"Cache system init must be called before using the system."
);
let conf = CONFIG.call_once(|| {
Config::new(
enabled,
dir,
compression_level.unwrap_or(DEFAULT_COMPRESSION_LEVEL),
)
});
debug!(
"Cache init(): enabled={}, cache-dir={:?}, compression-level={}",
conf.cache_enabled, conf.cache_dir, conf.compression_level,
);
}
impl Config {
pub fn new_cache_disabled() -> Self {
Self {
cache_enabled: false,
cache_dir: PathBuf::new(),
compression_level: DEFAULT_COMPRESSION_LEVEL,
}
}
pub fn new<P: AsRef<Path>>(enabled: bool, dir: Option<P>, compression_level: i32) -> Self {
if enabled {
match dir {
Some(dir) => Self::new_step2(dir.as_ref(), compression_level),
None => match ProjectDirs::from("", "CraneStation", "wasmtime") {
Some(proj_dirs) => {
Self::new_step2(proj_dirs.cache_dir(), compression_level)
}
None => {
warn!("Cache directory not specified and failed to find the default. Disabling cache.");
Self::new_cache_disabled()
}
},
}
} else {
Self::new_cache_disabled()
}
}
fn
|
(dir: &Path, compression_level: i32) -> Self {
// On Windows, if we want long paths, we need '\\?\' prefix, but it doesn't work
// with relative paths. One way to get absolute path (the only one?) is to use
// fs::canonicalize, but it requires that given path exists. The extra advantage
// of this method is fact that the method prepends '\\?\' on Windows.
match fs::create_dir_all(dir) {
Ok(()) => match fs::canonicalize(dir) {
Ok(p) => Self {
cache_enabled: true,
cache_dir: p,
compression_level,
},
Err(err) => {
warn!(
"Failed to canonicalize the cache directory. Disabling cache. \
Message: {}",
err
);
Self::new_cache_disabled()
}
},
Err(err) => {
warn!(
"Failed to create the cache directory. Disabling cache. Message: {}",
err
);
Self::new_cache_disabled()
}
}
}
}
}
lazy_static! {
static ref SELF_MTIME: String = {
std::env::current_exe()
.map_err(|_| warn!("Failed to get path of current executable"))
.ok()
.and_then(|path| {
fs::metadata(&path)
.map_err(|_| warn!("Failed to get metadata of current executable"))
.ok()
})
.and_then(|metadata| {
metadata
.modified()
.map_err(|_| warn!("Failed to get metadata of current executable"))
.ok()
})
.and_then(|mtime| {
Some(match mtime.duration_since(std::time::UNIX_EPOCH) {
Ok(duration) => format!("{}", duration.as_millis()),
Err(err) => format!("m{}", err.duration().as_millis()),
})
})
.unwrap_or("no-mtime".to_string())
};
}
pub struct ModuleCacheEntry {
mod_cache_path: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ModuleCacheData {
compilation: Compilation,
relocations: Relocations,
address_transforms: ModuleAddressMap,
value_ranges: ValueLabelsRanges,
stack_slots: PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
}
type ModuleCacheDataTupleType = (
Compilation,
Relocations,
ModuleAddressMap,
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
);
struct Sha256Hasher(Sha256);
impl ModuleCacheEntry {
pub fn new<'data>(
module: &Module,
function_body_inputs: &PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
isa: &dyn isa::TargetIsa,
compiler_name: &str,
generate_debug_info: bool,
) -> Self {
let mod_cache_path = if conf::cache_enabled() {
let hash = Sha256Hasher::digest(module, function_body_inputs);
let compiler_dir = if cfg!(debug_assertions) {
format!(
"{comp_name}-{comp_ver}-{comp_mtime}",
comp_name = compiler_name,
comp_ver = env!("GIT_REV"),
comp_mtime = *SELF_MTIME,
)
} else {
format!(
"{comp_name}-{comp_ver}",
comp_name = compiler_name,
comp_ver = env!("GIT_REV"),
)
};
let mod_filename = format!(
"mod-{mod_hash}{mod_dbg}",
mod_hash = base64::encode_config(&hash, base64::URL_SAFE_NO_PAD), // standard encoding uses '/' which can't be used for filename
mod_dbg = if generate_debug_info { ".d" } else { "" },
);
Some(
conf::cache_directory()
.join(isa.triple().to_string())
.join(compiler_dir)
.join(mod_filename),
)
} else {
None
};
Self { mod_cache_path }
}
pub fn get_data(&self) -> Option<ModuleCacheData> {
let path = self.mod_cache_path.as_ref()?;
trace!("get_data() for path: {}", path.display());
let compressed_cache_bytes = fs::read(path).ok()?;
let cache_bytes = zstd::decode_all(&compressed_cache_bytes[..])
.map_err(|err| warn!("Failed to decompress cached code: {}", err))
.ok()?;
bincode::deserialize(&cache_bytes[..])
.map_err(|err| warn!("Failed to deserialize cached code: {}", err))
.ok()
}
pub fn update_data(&self, data: &ModuleCacheData) {
let _ = self.update_data_impl(data);
}
fn update_data_impl(&self, data: &ModuleCacheData) -> Option<()> {
let path = self.mod_cache_path.as_ref()?;
trace!("update_data() for path: {}", path.display());
let serialized_data = bincode::serialize(&data)
.map_err(|err| warn!("Failed to serialize cached code: {}", err))
.ok()?;
let compressed_data = zstd::encode_all(&serialized_data[..], conf::compression_level())
.map_err(|err| warn!("Failed to compress cached code: {}", err))
.ok()?;
// Optimize syscalls: first, try writing to disk. It should succeed in most cases.
// Otherwise, try creating the cache directory and retry writing to the file.
let err = fs::write(path, &compressed_data).err()?; // return on success
debug!(
"Attempting to create the cache directory, because \
failed to write cached code to disk, path: {}, message: {}",
path.display(),
err,
);
let cache_dir = path.parent().unwrap();
fs::create_dir_all(cache_dir)
.map_err(|err| {
warn!(
"Failed to create cache directory, path: {}, message: {}",
cache_dir.display(),
err
)
})
.ok()?;
let err = fs::write(path, &compressed_data).err()?;
warn!(
"Failed to write cached code to disk, path: {}, message: {}",
path.display(),
err
);
fs::remove_file(path)
.map_err(|err| {
if err.kind()!= io::ErrorKind::NotFound {
warn!(
"Failed to cleanup invalid cache, path: {}, message: {}",
path.display(),
err
);
}
})
.ok()
}
}
impl ModuleCacheData {
pub fn from_tuple(data: ModuleCacheDataTupleType) -> Self {
Self {
compilation: data.0,
relocations: data.1,
address_transforms: data.2,
value_ranges: data.3,
stack_slots: data.4,
}
}
pub fn to_tuple(self) -> ModuleCacheDataTupleType {
(
self.compilation,
self.relocations,
self.address_transforms,
self.value_ranges,
self.stack_slots,
)
}
}
impl Sha256Hasher {
pub fn digest<'data>(
module: &Module,
function_body_inputs: &PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
) -> [u8; 32] {
let mut hasher = Self(Sha256::new());
module.hash_for_cache(function_body_inputs, &mut hasher);
hasher.0.result().into()
}
}
impl Hasher for Sha256Hasher {
fn finish(&self) -> u64 {
panic!("Sha256Hasher doesn't support finish!");
}
fn write(&mut self, bytes: &[u8]) {
self.0.input(bytes);
}
}
#[cfg(test)]
mod tests;
|
new_step2
|
identifier_name
|
cache.rs
|
use crate::address_map::{ModuleAddressMap, ValueLabelsRanges};
use crate::compilation::{Compilation, Relocations};
use crate::module::Module;
use crate::module_environ::FunctionBodyData;
use core::hash::Hasher;
use cranelift_codegen::{ir, isa};
use cranelift_entity::PrimaryMap;
use cranelift_wasm::DefinedFuncIndex;
use lazy_static::lazy_static;
use log::{debug, trace, warn};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::fs;
use std::io;
use std::path::PathBuf;
use std::string::{String, ToString};
/// Module for configuring the cache system.
pub mod conf {
use directories::ProjectDirs;
use log::{debug, warn};
use spin::Once;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
struct Config {
pub cache_enabled: bool,
pub cache_dir: PathBuf,
pub compression_level: i32,
}
// Private static, so only internal function can access it.
static CONFIG: Once<Config> = Once::new();
static INIT_CALLED: AtomicBool = AtomicBool::new(false);
static DEFAULT_COMPRESSION_LEVEL: i32 = 0; // 0 for zstd means "use default level"
/// Returns true if and only if the cache is enabled.
pub fn cache_enabled() -> bool {
// Not everyone knows about the cache system, i.e. the tests,
// so the default is cache disabled.
CONFIG
.call_once(|| Config::new_cache_disabled())
.cache_enabled
}
/// Returns path to the cache directory.
///
/// Panics if the cache is disabled.
pub fn cache_directory() -> &'static PathBuf {
&CONFIG
.r#try()
.expect("Cache system must be initialized")
.cache_dir
}
/// Returns cache compression level.
///
/// Panics if the cache is disabled.
pub fn compression_level() -> i32 {
CONFIG
.r#try()
.expect("Cache system must be initialized")
.compression_level
}
/// Initializes the cache system. Should be called exactly once,
/// and before using the cache system. Otherwise it can panic.
pub fn init<P: AsRef<Path>>(enabled: bool, dir: Option<P>, compression_level: Option<i32>) {
INIT_CALLED
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.expect("Cache system init must be called at most once");
assert!(
CONFIG.r#try().is_none(),
"Cache system init must be called before using the system."
);
let conf = CONFIG.call_once(|| {
Config::new(
enabled,
dir,
compression_level.unwrap_or(DEFAULT_COMPRESSION_LEVEL),
)
});
debug!(
"Cache init(): enabled={}, cache-dir={:?}, compression-level={}",
conf.cache_enabled, conf.cache_dir, conf.compression_level,
);
}
impl Config {
pub fn new_cache_disabled() -> Self {
Self {
cache_enabled: false,
cache_dir: PathBuf::new(),
compression_level: DEFAULT_COMPRESSION_LEVEL,
}
}
pub fn new<P: AsRef<Path>>(enabled: bool, dir: Option<P>, compression_level: i32) -> Self {
if enabled {
match dir {
Some(dir) => Self::new_step2(dir.as_ref(), compression_level),
None => match ProjectDirs::from("", "CraneStation", "wasmtime") {
Some(proj_dirs) => {
Self::new_step2(proj_dirs.cache_dir(), compression_level)
}
None => {
warn!("Cache directory not specified and failed to find the default. Disabling cache.");
Self::new_cache_disabled()
}
},
}
} else {
Self::new_cache_disabled()
}
}
fn new_step2(dir: &Path, compression_level: i32) -> Self {
// On Windows, if we want long paths, we need '\\?\' prefix, but it doesn't work
// with relative paths. One way to get absolute path (the only one?) is to use
// fs::canonicalize, but it requires that given path exists. The extra advantage
// of this method is fact that the method prepends '\\?\' on Windows.
match fs::create_dir_all(dir) {
Ok(()) => match fs::canonicalize(dir) {
Ok(p) => Self {
cache_enabled: true,
cache_dir: p,
compression_level,
},
Err(err) => {
warn!(
"Failed to canonicalize the cache directory. Disabling cache. \
Message: {}",
err
);
Self::new_cache_disabled()
}
},
Err(err) => {
warn!(
"Failed to create the cache directory. Disabling cache. Message: {}",
err
);
Self::new_cache_disabled()
}
}
}
}
}
lazy_static! {
static ref SELF_MTIME: String = {
std::env::current_exe()
.map_err(|_| warn!("Failed to get path of current executable"))
.ok()
.and_then(|path| {
fs::metadata(&path)
.map_err(|_| warn!("Failed to get metadata of current executable"))
.ok()
})
.and_then(|metadata| {
metadata
.modified()
.map_err(|_| warn!("Failed to get metadata of current executable"))
.ok()
})
.and_then(|mtime| {
Some(match mtime.duration_since(std::time::UNIX_EPOCH) {
Ok(duration) => format!("{}", duration.as_millis()),
Err(err) => format!("m{}", err.duration().as_millis()),
})
})
.unwrap_or("no-mtime".to_string())
};
}
pub struct ModuleCacheEntry {
mod_cache_path: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ModuleCacheData {
compilation: Compilation,
relocations: Relocations,
address_transforms: ModuleAddressMap,
value_ranges: ValueLabelsRanges,
stack_slots: PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
}
type ModuleCacheDataTupleType = (
Compilation,
Relocations,
ModuleAddressMap,
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
);
struct Sha256Hasher(Sha256);
impl ModuleCacheEntry {
pub fn new<'data>(
module: &Module,
function_body_inputs: &PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
isa: &dyn isa::TargetIsa,
compiler_name: &str,
generate_debug_info: bool,
) -> Self
|
mod_dbg = if generate_debug_info { ".d" } else { "" },
);
Some(
conf::cache_directory()
.join(isa.triple().to_string())
.join(compiler_dir)
.join(mod_filename),
)
} else {
None
};
Self { mod_cache_path }
}
pub fn get_data(&self) -> Option<ModuleCacheData> {
let path = self.mod_cache_path.as_ref()?;
trace!("get_data() for path: {}", path.display());
let compressed_cache_bytes = fs::read(path).ok()?;
let cache_bytes = zstd::decode_all(&compressed_cache_bytes[..])
.map_err(|err| warn!("Failed to decompress cached code: {}", err))
.ok()?;
bincode::deserialize(&cache_bytes[..])
.map_err(|err| warn!("Failed to deserialize cached code: {}", err))
.ok()
}
pub fn update_data(&self, data: &ModuleCacheData) {
let _ = self.update_data_impl(data);
}
fn update_data_impl(&self, data: &ModuleCacheData) -> Option<()> {
let path = self.mod_cache_path.as_ref()?;
trace!("update_data() for path: {}", path.display());
let serialized_data = bincode::serialize(&data)
.map_err(|err| warn!("Failed to serialize cached code: {}", err))
.ok()?;
let compressed_data = zstd::encode_all(&serialized_data[..], conf::compression_level())
.map_err(|err| warn!("Failed to compress cached code: {}", err))
.ok()?;
// Optimize syscalls: first, try writing to disk. It should succeed in most cases.
// Otherwise, try creating the cache directory and retry writing to the file.
let err = fs::write(path, &compressed_data).err()?; // return on success
debug!(
"Attempting to create the cache directory, because \
failed to write cached code to disk, path: {}, message: {}",
path.display(),
err,
);
let cache_dir = path.parent().unwrap();
fs::create_dir_all(cache_dir)
.map_err(|err| {
warn!(
"Failed to create cache directory, path: {}, message: {}",
cache_dir.display(),
err
)
})
.ok()?;
let err = fs::write(path, &compressed_data).err()?;
warn!(
"Failed to write cached code to disk, path: {}, message: {}",
path.display(),
err
);
fs::remove_file(path)
.map_err(|err| {
if err.kind()!= io::ErrorKind::NotFound {
warn!(
"Failed to cleanup invalid cache, path: {}, message: {}",
path.display(),
err
);
}
})
.ok()
}
}
impl ModuleCacheData {
pub fn from_tuple(data: ModuleCacheDataTupleType) -> Self {
Self {
compilation: data.0,
relocations: data.1,
address_transforms: data.2,
value_ranges: data.3,
stack_slots: data.4,
}
}
pub fn to_tuple(self) -> ModuleCacheDataTupleType {
(
self.compilation,
self.relocations,
self.address_transforms,
self.value_ranges,
self.stack_slots,
)
}
}
impl Sha256Hasher {
pub fn digest<'data>(
module: &Module,
function_body_inputs: &PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
) -> [u8; 32] {
let mut hasher = Self(Sha256::new());
module.hash_for_cache(function_body_inputs, &mut hasher);
hasher.0.result().into()
}
}
impl Hasher for Sha256Hasher {
fn finish(&self) -> u64 {
panic!("Sha256Hasher doesn't support finish!");
}
fn write(&mut self, bytes: &[u8]) {
self.0.input(bytes);
}
}
#[cfg(test)]
mod tests;
|
{
let mod_cache_path = if conf::cache_enabled() {
let hash = Sha256Hasher::digest(module, function_body_inputs);
let compiler_dir = if cfg!(debug_assertions) {
format!(
"{comp_name}-{comp_ver}-{comp_mtime}",
comp_name = compiler_name,
comp_ver = env!("GIT_REV"),
comp_mtime = *SELF_MTIME,
)
} else {
format!(
"{comp_name}-{comp_ver}",
comp_name = compiler_name,
comp_ver = env!("GIT_REV"),
)
};
let mod_filename = format!(
"mod-{mod_hash}{mod_dbg}",
mod_hash = base64::encode_config(&hash, base64::URL_SAFE_NO_PAD), // standard encoding uses '/' which can't be used for filename
|
identifier_body
|
cache.rs
|
use crate::address_map::{ModuleAddressMap, ValueLabelsRanges};
use crate::compilation::{Compilation, Relocations};
use crate::module::Module;
use crate::module_environ::FunctionBodyData;
use core::hash::Hasher;
use cranelift_codegen::{ir, isa};
use cranelift_entity::PrimaryMap;
use cranelift_wasm::DefinedFuncIndex;
use lazy_static::lazy_static;
use log::{debug, trace, warn};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::fs;
use std::io;
use std::path::PathBuf;
use std::string::{String, ToString};
/// Module for configuring the cache system.
pub mod conf {
use directories::ProjectDirs;
use log::{debug, warn};
use spin::Once;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
|
}
// Private static, so only internal function can access it.
static CONFIG: Once<Config> = Once::new();
static INIT_CALLED: AtomicBool = AtomicBool::new(false);
static DEFAULT_COMPRESSION_LEVEL: i32 = 0; // 0 for zstd means "use default level"
/// Returns true if and only if the cache is enabled.
pub fn cache_enabled() -> bool {
// Not everyone knows about the cache system, i.e. the tests,
// so the default is cache disabled.
CONFIG
.call_once(|| Config::new_cache_disabled())
.cache_enabled
}
/// Returns path to the cache directory.
///
/// Panics if the cache is disabled.
pub fn cache_directory() -> &'static PathBuf {
&CONFIG
.r#try()
.expect("Cache system must be initialized")
.cache_dir
}
/// Returns cache compression level.
///
/// Panics if the cache is disabled.
pub fn compression_level() -> i32 {
CONFIG
.r#try()
.expect("Cache system must be initialized")
.compression_level
}
/// Initializes the cache system. Should be called exactly once,
/// and before using the cache system. Otherwise it can panic.
pub fn init<P: AsRef<Path>>(enabled: bool, dir: Option<P>, compression_level: Option<i32>) {
INIT_CALLED
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.expect("Cache system init must be called at most once");
assert!(
CONFIG.r#try().is_none(),
"Cache system init must be called before using the system."
);
let conf = CONFIG.call_once(|| {
Config::new(
enabled,
dir,
compression_level.unwrap_or(DEFAULT_COMPRESSION_LEVEL),
)
});
debug!(
"Cache init(): enabled={}, cache-dir={:?}, compression-level={}",
conf.cache_enabled, conf.cache_dir, conf.compression_level,
);
}
impl Config {
pub fn new_cache_disabled() -> Self {
Self {
cache_enabled: false,
cache_dir: PathBuf::new(),
compression_level: DEFAULT_COMPRESSION_LEVEL,
}
}
pub fn new<P: AsRef<Path>>(enabled: bool, dir: Option<P>, compression_level: i32) -> Self {
if enabled {
match dir {
Some(dir) => Self::new_step2(dir.as_ref(), compression_level),
None => match ProjectDirs::from("", "CraneStation", "wasmtime") {
Some(proj_dirs) => {
Self::new_step2(proj_dirs.cache_dir(), compression_level)
}
None => {
warn!("Cache directory not specified and failed to find the default. Disabling cache.");
Self::new_cache_disabled()
}
},
}
} else {
Self::new_cache_disabled()
}
}
fn new_step2(dir: &Path, compression_level: i32) -> Self {
// On Windows, if we want long paths, we need '\\?\' prefix, but it doesn't work
// with relative paths. One way to get absolute path (the only one?) is to use
// fs::canonicalize, but it requires that given path exists. The extra advantage
// of this method is fact that the method prepends '\\?\' on Windows.
match fs::create_dir_all(dir) {
Ok(()) => match fs::canonicalize(dir) {
Ok(p) => Self {
cache_enabled: true,
cache_dir: p,
compression_level,
},
Err(err) => {
warn!(
"Failed to canonicalize the cache directory. Disabling cache. \
Message: {}",
err
);
Self::new_cache_disabled()
}
},
Err(err) => {
warn!(
"Failed to create the cache directory. Disabling cache. Message: {}",
err
);
Self::new_cache_disabled()
}
}
}
}
}
lazy_static! {
static ref SELF_MTIME: String = {
std::env::current_exe()
.map_err(|_| warn!("Failed to get path of current executable"))
.ok()
.and_then(|path| {
fs::metadata(&path)
.map_err(|_| warn!("Failed to get metadata of current executable"))
.ok()
})
.and_then(|metadata| {
metadata
.modified()
.map_err(|_| warn!("Failed to get metadata of current executable"))
.ok()
})
.and_then(|mtime| {
Some(match mtime.duration_since(std::time::UNIX_EPOCH) {
Ok(duration) => format!("{}", duration.as_millis()),
Err(err) => format!("m{}", err.duration().as_millis()),
})
})
.unwrap_or("no-mtime".to_string())
};
}
pub struct ModuleCacheEntry {
mod_cache_path: Option<PathBuf>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ModuleCacheData {
compilation: Compilation,
relocations: Relocations,
address_transforms: ModuleAddressMap,
value_ranges: ValueLabelsRanges,
stack_slots: PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
}
type ModuleCacheDataTupleType = (
Compilation,
Relocations,
ModuleAddressMap,
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
);
struct Sha256Hasher(Sha256);
impl ModuleCacheEntry {
pub fn new<'data>(
module: &Module,
function_body_inputs: &PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
isa: &dyn isa::TargetIsa,
compiler_name: &str,
generate_debug_info: bool,
) -> Self {
let mod_cache_path = if conf::cache_enabled() {
let hash = Sha256Hasher::digest(module, function_body_inputs);
let compiler_dir = if cfg!(debug_assertions) {
format!(
"{comp_name}-{comp_ver}-{comp_mtime}",
comp_name = compiler_name,
comp_ver = env!("GIT_REV"),
comp_mtime = *SELF_MTIME,
)
} else {
format!(
"{comp_name}-{comp_ver}",
comp_name = compiler_name,
comp_ver = env!("GIT_REV"),
)
};
let mod_filename = format!(
"mod-{mod_hash}{mod_dbg}",
mod_hash = base64::encode_config(&hash, base64::URL_SAFE_NO_PAD), // standard encoding uses '/' which can't be used for filename
mod_dbg = if generate_debug_info { ".d" } else { "" },
);
Some(
conf::cache_directory()
.join(isa.triple().to_string())
.join(compiler_dir)
.join(mod_filename),
)
} else {
None
};
Self { mod_cache_path }
}
pub fn get_data(&self) -> Option<ModuleCacheData> {
let path = self.mod_cache_path.as_ref()?;
trace!("get_data() for path: {}", path.display());
let compressed_cache_bytes = fs::read(path).ok()?;
let cache_bytes = zstd::decode_all(&compressed_cache_bytes[..])
.map_err(|err| warn!("Failed to decompress cached code: {}", err))
.ok()?;
bincode::deserialize(&cache_bytes[..])
.map_err(|err| warn!("Failed to deserialize cached code: {}", err))
.ok()
}
pub fn update_data(&self, data: &ModuleCacheData) {
let _ = self.update_data_impl(data);
}
fn update_data_impl(&self, data: &ModuleCacheData) -> Option<()> {
let path = self.mod_cache_path.as_ref()?;
trace!("update_data() for path: {}", path.display());
let serialized_data = bincode::serialize(&data)
.map_err(|err| warn!("Failed to serialize cached code: {}", err))
.ok()?;
let compressed_data = zstd::encode_all(&serialized_data[..], conf::compression_level())
.map_err(|err| warn!("Failed to compress cached code: {}", err))
.ok()?;
// Optimize syscalls: first, try writing to disk. It should succeed in most cases.
// Otherwise, try creating the cache directory and retry writing to the file.
let err = fs::write(path, &compressed_data).err()?; // return on success
debug!(
"Attempting to create the cache directory, because \
failed to write cached code to disk, path: {}, message: {}",
path.display(),
err,
);
let cache_dir = path.parent().unwrap();
fs::create_dir_all(cache_dir)
.map_err(|err| {
warn!(
"Failed to create cache directory, path: {}, message: {}",
cache_dir.display(),
err
)
})
.ok()?;
let err = fs::write(path, &compressed_data).err()?;
warn!(
"Failed to write cached code to disk, path: {}, message: {}",
path.display(),
err
);
fs::remove_file(path)
.map_err(|err| {
if err.kind()!= io::ErrorKind::NotFound {
warn!(
"Failed to cleanup invalid cache, path: {}, message: {}",
path.display(),
err
);
}
})
.ok()
}
}
impl ModuleCacheData {
pub fn from_tuple(data: ModuleCacheDataTupleType) -> Self {
Self {
compilation: data.0,
relocations: data.1,
address_transforms: data.2,
value_ranges: data.3,
stack_slots: data.4,
}
}
pub fn to_tuple(self) -> ModuleCacheDataTupleType {
(
self.compilation,
self.relocations,
self.address_transforms,
self.value_ranges,
self.stack_slots,
)
}
}
impl Sha256Hasher {
pub fn digest<'data>(
module: &Module,
function_body_inputs: &PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
) -> [u8; 32] {
let mut hasher = Self(Sha256::new());
module.hash_for_cache(function_body_inputs, &mut hasher);
hasher.0.result().into()
}
}
impl Hasher for Sha256Hasher {
fn finish(&self) -> u64 {
panic!("Sha256Hasher doesn't support finish!");
}
fn write(&mut self, bytes: &[u8]) {
self.0.input(bytes);
}
}
#[cfg(test)]
mod tests;
|
struct Config {
pub cache_enabled: bool,
pub cache_dir: PathBuf,
pub compression_level: i32,
|
random_line_split
|
main.rs
|
// vim:set et sw=4 ts=4 foldmethod=marker:
#![warn(clippy::all, clippy::pedantic)]
#![warn(missing_docs)]
#![recursion_limit="512"]
// starting doc {{{
//! ARES: Automatic REcord System.
//!
//! A Kubernetes-native system to automatically create and manage DNS records
//! meant to run in parallel with External DNS.
//!
//! Configuration is managed through the ares-secret Secret, typically in the
//! default namespace. This may change in the future to default to the
//! namespace that ARES is deployed in.
//!
//! ## Configuration
//!
//! A configuration file should look like this:
//!
//! ```yaml
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! The corresponding Secret can look like:
//!
//! ```yaml
//! apiVersion: v1
//! kind: Secret
//! metadata:
//! name: ares-secret
//! stringData:
//! ares.yaml: |-
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! If you want to control multiple domain zones across multiple different
//! providers, you can add another element into the default array and
//! configure another provider there. You can configure multiple domain zones
//! through a single provider.
//!
//! ## Custom Resource Definitions
//!
//! ARES watches over the syntixi.io/v1alpha1/Record CustomResourceDefinition
//! to know which domain names to add, remove, or modify. An example resource
//! is below.
//!
//! ```yaml
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example
//! spec:
//! fqdn: example.syntixi.io
//! ttl: 100
//! type: CNAME
//! value:
//! - syntixi.io
//! ```
//!
//! For addresses that can change, such as Nodes that Pods may be running on,
//! it is recommended to instead use a valueFrom selector, such as the
//! PodSelector. The example below includes a Pod and a Record that points to
//! the Node the Pod is running on, with a Selector similar to that in the
//! Kubernetes
//! [documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
//!
//! This should not be used for inbound traffic (for that, you should use a
//! LoadBalancer Service or an Ingress record, with external-dns). This is,
//! however, useful for making SPF records point to an outbound mail record,
//! where the mail can be sent from one of many Nodes.
//!
//! ```yaml
//! apiVersion: v1
//! kind: Pod
//! metadata:
//! name: nginx-hello-world
//! app: nginx
//! spec:
//! containers:
//! - name: nginx
//! image: nginxdemos/hello
//! ---
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example-selector
//! spec:
//! fqdn: selector.syntixi.io
//! ttl: 1
//! valueFrom:
//! podSelector:
//! matchLabels:
//! app: nginx
//! ```
//!
//! When a syntixi.io/v1alpha1/Record resource is created, an additional record
//! is made for ARES to track ownership over the DNS record. So long as that
//! tracking record exists, when the Kubernetes resource is deleted, the
//! corresponding record and tracking record will be deleted.
// }}}
// imports {{{
use clap::Clap;
use std::ops::Deref;
use std::sync::Arc;
use slog::{
crit, debug, error, info, log, o,
Drain, Logger,
};
use anyhow::{anyhow, Result};
use futures::{
StreamExt, TryStreamExt, select,
future::{Future, Fuse, join_all},
};
use k8s_openapi::api::core::v1::{Event, Secret};
use kube::{
api::{Api, ListParams, Meta, WatchEvent},
Client,
};
use kube_runtime::{utils::try_flatten_applied, watcher};
use kube_derive::{CustomResource};
mod cli;
mod xpathable;
mod providers;
mod program_config;
mod record_spec;
use program_config::AresConfig;
use providers::{ProviderConfig, util::{ProviderBackend, ZoneDomainName,
RecordType, Record as RecordObject}};
use record_spec::{Record, RecordValueCollector};
// }}}
#[tokio::main]
async fn
|
() -> Result<()> {
let opts: cli::Opts = cli::Opts::parse();
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
let root_logger = slog::Logger::root(
drain,
o!("secret" => opts.secret.clone(),
"secret_key" => opts.secret_key.clone(),
"secret_namespace" => opts.secret_namespace.clone()),
);
let client = Client::try_default().await?;
info!(root_logger, "Loading configuration from Secret");
let secrets: Api<Secret> = Api::namespaced(client, opts.secret_namespace.as_str());
let secret = secrets.get(opts.secret.as_str()).await?;
let config_data = secret
.data
.as_ref()
.ok_or(anyhow!("Unable to get data from Secret"))?;
let config_content = config_data
.get(opts.secret_key.as_str())
.ok_or(anyhow!("Unable to get key from Secret"))?
.clone().0;
debug!(root_logger, "Configuration loaded from Secret");
let config: Vec<Arc<AresConfig>> =
serde_yaml::from_str::<Vec<_>>(std::str::from_utf8(&config_content[..])?)?
.into_iter()
.map(Arc::new)
.collect();
let records: Api<Record> = Api::all(Client::try_default().await?);
let record_list: Vec<Arc<Record>> = records.list(&ListParams::default()).await?
.items
.into_iter()
.map(Arc::new)
.collect();
let mut handles = vec![];
// TODO watch over config and reload when changes are made
for ares in config.into_iter() {
// Find all matching Records and put a ref of them into a Vec
let allowed_records: Vec<Arc<Record>> = record_list
.iter()
.filter(|record| ares.matches_selector(record.spec.fqdn.as_str()))
.map(|x| x.clone()) // clone() of Arc<> is intentional
.collect();
// TODO put a watcher over records instead of just getting them at program start
for mut record in allowed_records {
// Generate a proxy logger to be cloned so we can build upon it every loop
let proxy_logger = root_logger.new(o!());
let sub_ac = ares.clone(); // clone of Arc<> is intentional
handles.push(tokio::spawn(async move {
loop {
let sub_logger = proxy_logger.new(o!("record" => record.spec.fqdn.clone()));
if let Some(collector_obj) = &record.spec.value_from {
let collector = collector_obj.deref();
info!(sub_logger, "Getting zone domain name");
let zone = match sub_ac.provider.get_zone(&record.spec.fqdn).await {
Ok(z) => z,
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
};
let mut builder = RecordObject::builder(record.spec.fqdn.clone(),
zone, RecordType::A);
// Syncing should happen regardless of using a watcher to ensure that any
// extra records are deleted.
info!(sub_logger, "Syncing");
let sync_state = collector.sync(&record.metadata, &sub_ac.provider,
&mut builder).await;
if let Err(e) = sync_state {
crit!(sub_logger, "Error! {}", e);
break
}
info!(sub_logger, "Finished syncing");
info!(sub_logger, "Spawning watcher");
let res = collector.watch_values(&record.metadata, &sub_ac.provider,
&mut builder).await;
info!(sub_logger, "Stopped watching");
// Set a new record if the watcher stops; this could be the result of a
// timeout or a change in the Record value, which may need a refresh.
record = match res {
Ok(r) => Arc::new(r),
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
}
}
}
}));
}
}
let secret_logger = root_logger.new(o!());
handles.push(tokio::spawn(async move {
loop {
info!(secret_logger, "Watching over Secrets to detect configuration changes");
let mut secret_watcher = secrets
.watch(&ListParams::default(), "0")
.await
.unwrap()
.boxed();
while let Ok(Some(secret_status)) = secret_watcher.try_next().await {
// If the configuration changes, trigger a panic which will cause a restart.
match secret_status {
WatchEvent::Modified(modified) => {
if modified.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
WatchEvent::Deleted(deleted) => {
if deleted.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
_ => {},
}
}
info!(secret_logger, "Restarting Secret watcher");
}
}));
join_all(handles).await;
Ok(())
}
|
main
|
identifier_name
|
main.rs
|
// vim:set et sw=4 ts=4 foldmethod=marker:
#![warn(clippy::all, clippy::pedantic)]
#![warn(missing_docs)]
#![recursion_limit="512"]
// starting doc {{{
//! ARES: Automatic REcord System.
//!
//! A Kubernetes-native system to automatically create and manage DNS records
//! meant to run in parallel with External DNS.
//!
//! Configuration is managed through the ares-secret Secret, typically in the
//! default namespace. This may change in the future to default to the
//! namespace that ARES is deployed in.
//!
//! ## Configuration
//!
//! A configuration file should look like this:
//!
//! ```yaml
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! The corresponding Secret can look like:
//!
//! ```yaml
//! apiVersion: v1
//! kind: Secret
//! metadata:
//! name: ares-secret
//! stringData:
//! ares.yaml: |-
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! If you want to control multiple domain zones across multiple different
//! providers, you can add another element into the default array and
//! configure another provider there. You can configure multiple domain zones
//! through a single provider.
//!
//! ## Custom Resource Definitions
//!
//! ARES watches over the syntixi.io/v1alpha1/Record CustomResourceDefinition
//! to know which domain names to add, remove, or modify. An example resource
//! is below.
//!
//! ```yaml
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example
//! spec:
//! fqdn: example.syntixi.io
//! ttl: 100
//! type: CNAME
//! value:
//! - syntixi.io
//! ```
//!
//! For addresses that can change, such as Nodes that Pods may be running on,
//! it is recommended to instead use a valueFrom selector, such as the
//! PodSelector. The example below includes a Pod and a Record that points to
//! the Node the Pod is running on, with a Selector similar to that in the
//! Kubernetes
//! [documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
//!
//! This should not be used for inbound traffic (for that, you should use a
//! LoadBalancer Service or an Ingress record, with external-dns). This is,
//! however, useful for making SPF records point to an outbound mail record,
//! where the mail can be sent from one of many Nodes.
//!
//! ```yaml
//! apiVersion: v1
//! kind: Pod
//! metadata:
//! name: nginx-hello-world
//! app: nginx
//! spec:
//! containers:
//! - name: nginx
//! image: nginxdemos/hello
//! ---
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example-selector
//! spec:
//! fqdn: selector.syntixi.io
//! ttl: 1
//! valueFrom:
//! podSelector:
//! matchLabels:
//! app: nginx
//! ```
//!
//! When a syntixi.io/v1alpha1/Record resource is created, an additional record
//! is made for ARES to track ownership over the DNS record. So long as that
//! tracking record exists, when the Kubernetes resource is deleted, the
//! corresponding record and tracking record will be deleted.
// }}}
// imports {{{
use clap::Clap;
use std::ops::Deref;
use std::sync::Arc;
use slog::{
crit, debug, error, info, log, o,
Drain, Logger,
};
use anyhow::{anyhow, Result};
use futures::{
StreamExt, TryStreamExt, select,
future::{Future, Fuse, join_all},
};
use k8s_openapi::api::core::v1::{Event, Secret};
use kube::{
api::{Api, ListParams, Meta, WatchEvent},
Client,
};
use kube_runtime::{utils::try_flatten_applied, watcher};
use kube_derive::{CustomResource};
mod cli;
mod xpathable;
mod providers;
mod program_config;
mod record_spec;
use program_config::AresConfig;
use providers::{ProviderConfig, util::{ProviderBackend, ZoneDomainName,
RecordType, Record as RecordObject}};
use record_spec::{Record, RecordValueCollector};
// }}}
#[tokio::main]
async fn main() -> Result<()> {
let opts: cli::Opts = cli::Opts::parse();
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
let root_logger = slog::Logger::root(
drain,
o!("secret" => opts.secret.clone(),
"secret_key" => opts.secret_key.clone(),
"secret_namespace" => opts.secret_namespace.clone()),
);
let client = Client::try_default().await?;
info!(root_logger, "Loading configuration from Secret");
let secrets: Api<Secret> = Api::namespaced(client, opts.secret_namespace.as_str());
let secret = secrets.get(opts.secret.as_str()).await?;
let config_data = secret
.data
.as_ref()
.ok_or(anyhow!("Unable to get data from Secret"))?;
let config_content = config_data
.get(opts.secret_key.as_str())
.ok_or(anyhow!("Unable to get key from Secret"))?
.clone().0;
debug!(root_logger, "Configuration loaded from Secret");
let config: Vec<Arc<AresConfig>> =
serde_yaml::from_str::<Vec<_>>(std::str::from_utf8(&config_content[..])?)?
.into_iter()
.map(Arc::new)
.collect();
let records: Api<Record> = Api::all(Client::try_default().await?);
let record_list: Vec<Arc<Record>> = records.list(&ListParams::default()).await?
.items
.into_iter()
.map(Arc::new)
.collect();
let mut handles = vec![];
// TODO watch over config and reload when changes are made
for ares in config.into_iter() {
// Find all matching Records and put a ref of them into a Vec
let allowed_records: Vec<Arc<Record>> = record_list
.iter()
.filter(|record| ares.matches_selector(record.spec.fqdn.as_str()))
.map(|x| x.clone()) // clone() of Arc<> is intentional
.collect();
// TODO put a watcher over records instead of just getting them at program start
for mut record in allowed_records {
// Generate a proxy logger to be cloned so we can build upon it every loop
let proxy_logger = root_logger.new(o!());
let sub_ac = ares.clone(); // clone of Arc<> is intentional
handles.push(tokio::spawn(async move {
loop {
let sub_logger = proxy_logger.new(o!("record" => record.spec.fqdn.clone()));
if let Some(collector_obj) = &record.spec.value_from {
let collector = collector_obj.deref();
info!(sub_logger, "Getting zone domain name");
let zone = match sub_ac.provider.get_zone(&record.spec.fqdn).await {
Ok(z) => z,
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
};
let mut builder = RecordObject::builder(record.spec.fqdn.clone(),
zone, RecordType::A);
// Syncing should happen regardless of using a watcher to ensure that any
// extra records are deleted.
info!(sub_logger, "Syncing");
let sync_state = collector.sync(&record.metadata, &sub_ac.provider,
&mut builder).await;
if let Err(e) = sync_state
|
info!(sub_logger, "Finished syncing");
info!(sub_logger, "Spawning watcher");
let res = collector.watch_values(&record.metadata, &sub_ac.provider,
&mut builder).await;
info!(sub_logger, "Stopped watching");
// Set a new record if the watcher stops; this could be the result of a
// timeout or a change in the Record value, which may need a refresh.
record = match res {
Ok(r) => Arc::new(r),
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
}
}
}
}));
}
}
let secret_logger = root_logger.new(o!());
handles.push(tokio::spawn(async move {
loop {
info!(secret_logger, "Watching over Secrets to detect configuration changes");
let mut secret_watcher = secrets
.watch(&ListParams::default(), "0")
.await
.unwrap()
.boxed();
while let Ok(Some(secret_status)) = secret_watcher.try_next().await {
// If the configuration changes, trigger a panic which will cause a restart.
match secret_status {
WatchEvent::Modified(modified) => {
if modified.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
WatchEvent::Deleted(deleted) => {
if deleted.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
_ => {},
}
}
info!(secret_logger, "Restarting Secret watcher");
}
}));
join_all(handles).await;
Ok(())
}
|
{
crit!(sub_logger, "Error! {}", e);
break
}
|
conditional_block
|
main.rs
|
// vim:set et sw=4 ts=4 foldmethod=marker:
#![warn(clippy::all, clippy::pedantic)]
#![warn(missing_docs)]
#![recursion_limit="512"]
// starting doc {{{
//! ARES: Automatic REcord System.
//!
//! A Kubernetes-native system to automatically create and manage DNS records
//! meant to run in parallel with External DNS.
//!
//! Configuration is managed through the ares-secret Secret, typically in the
//! default namespace. This may change in the future to default to the
//! namespace that ARES is deployed in.
//!
//! ## Configuration
//!
//! A configuration file should look like this:
//!
//! ```yaml
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! The corresponding Secret can look like:
//!
//! ```yaml
//! apiVersion: v1
//! kind: Secret
//! metadata:
//! name: ares-secret
//! stringData:
//! ares.yaml: |-
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! If you want to control multiple domain zones across multiple different
//! providers, you can add another element into the default array and
//! configure another provider there. You can configure multiple domain zones
//! through a single provider.
//!
//! ## Custom Resource Definitions
//!
//! ARES watches over the syntixi.io/v1alpha1/Record CustomResourceDefinition
//! to know which domain names to add, remove, or modify. An example resource
//! is below.
//!
//! ```yaml
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example
//! spec:
//! fqdn: example.syntixi.io
//! ttl: 100
//! type: CNAME
//! value:
//! - syntixi.io
//! ```
//!
//! For addresses that can change, such as Nodes that Pods may be running on,
//! it is recommended to instead use a valueFrom selector, such as the
//! PodSelector. The example below includes a Pod and a Record that points to
//! the Node the Pod is running on, with a Selector similar to that in the
//! Kubernetes
//! [documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
//!
//! This should not be used for inbound traffic (for that, you should use a
//! LoadBalancer Service or an Ingress record, with external-dns). This is,
|
//! ```yaml
//! apiVersion: v1
//! kind: Pod
//! metadata:
//! name: nginx-hello-world
//! app: nginx
//! spec:
//! containers:
//! - name: nginx
//! image: nginxdemos/hello
//! ---
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example-selector
//! spec:
//! fqdn: selector.syntixi.io
//! ttl: 1
//! valueFrom:
//! podSelector:
//! matchLabels:
//! app: nginx
//! ```
//!
//! When a syntixi.io/v1alpha1/Record resource is created, an additional record
//! is made for ARES to track ownership over the DNS record. So long as that
//! tracking record exists, when the Kubernetes resource is deleted, the
//! corresponding record and tracking record will be deleted.
// }}}
// imports {{{
use clap::Clap;
use std::ops::Deref;
use std::sync::Arc;
use slog::{
crit, debug, error, info, log, o,
Drain, Logger,
};
use anyhow::{anyhow, Result};
use futures::{
StreamExt, TryStreamExt, select,
future::{Future, Fuse, join_all},
};
use k8s_openapi::api::core::v1::{Event, Secret};
use kube::{
api::{Api, ListParams, Meta, WatchEvent},
Client,
};
use kube_runtime::{utils::try_flatten_applied, watcher};
use kube_derive::{CustomResource};
mod cli;
mod xpathable;
mod providers;
mod program_config;
mod record_spec;
use program_config::AresConfig;
use providers::{ProviderConfig, util::{ProviderBackend, ZoneDomainName,
RecordType, Record as RecordObject}};
use record_spec::{Record, RecordValueCollector};
// }}}
#[tokio::main]
async fn main() -> Result<()> {
let opts: cli::Opts = cli::Opts::parse();
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
let root_logger = slog::Logger::root(
drain,
o!("secret" => opts.secret.clone(),
"secret_key" => opts.secret_key.clone(),
"secret_namespace" => opts.secret_namespace.clone()),
);
let client = Client::try_default().await?;
info!(root_logger, "Loading configuration from Secret");
let secrets: Api<Secret> = Api::namespaced(client, opts.secret_namespace.as_str());
let secret = secrets.get(opts.secret.as_str()).await?;
let config_data = secret
.data
.as_ref()
.ok_or(anyhow!("Unable to get data from Secret"))?;
let config_content = config_data
.get(opts.secret_key.as_str())
.ok_or(anyhow!("Unable to get key from Secret"))?
.clone().0;
debug!(root_logger, "Configuration loaded from Secret");
let config: Vec<Arc<AresConfig>> =
serde_yaml::from_str::<Vec<_>>(std::str::from_utf8(&config_content[..])?)?
.into_iter()
.map(Arc::new)
.collect();
let records: Api<Record> = Api::all(Client::try_default().await?);
let record_list: Vec<Arc<Record>> = records.list(&ListParams::default()).await?
.items
.into_iter()
.map(Arc::new)
.collect();
let mut handles = vec![];
// TODO watch over config and reload when changes are made
for ares in config.into_iter() {
// Find all matching Records and put a ref of them into a Vec
let allowed_records: Vec<Arc<Record>> = record_list
.iter()
.filter(|record| ares.matches_selector(record.spec.fqdn.as_str()))
.map(|x| x.clone()) // clone() of Arc<> is intentional
.collect();
// TODO put a watcher over records instead of just getting them at program start
for mut record in allowed_records {
// Generate a proxy logger to be cloned so we can build upon it every loop
let proxy_logger = root_logger.new(o!());
let sub_ac = ares.clone(); // clone of Arc<> is intentional
handles.push(tokio::spawn(async move {
loop {
let sub_logger = proxy_logger.new(o!("record" => record.spec.fqdn.clone()));
if let Some(collector_obj) = &record.spec.value_from {
let collector = collector_obj.deref();
info!(sub_logger, "Getting zone domain name");
let zone = match sub_ac.provider.get_zone(&record.spec.fqdn).await {
Ok(z) => z,
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
};
let mut builder = RecordObject::builder(record.spec.fqdn.clone(),
zone, RecordType::A);
// Syncing should happen regardless of using a watcher to ensure that any
// extra records are deleted.
info!(sub_logger, "Syncing");
let sync_state = collector.sync(&record.metadata, &sub_ac.provider,
&mut builder).await;
if let Err(e) = sync_state {
crit!(sub_logger, "Error! {}", e);
break
}
info!(sub_logger, "Finished syncing");
info!(sub_logger, "Spawning watcher");
let res = collector.watch_values(&record.metadata, &sub_ac.provider,
&mut builder).await;
info!(sub_logger, "Stopped watching");
// Set a new record if the watcher stops; this could be the result of a
// timeout or a change in the Record value, which may need a refresh.
record = match res {
Ok(r) => Arc::new(r),
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
}
}
}
}));
}
}
let secret_logger = root_logger.new(o!());
handles.push(tokio::spawn(async move {
loop {
info!(secret_logger, "Watching over Secrets to detect configuration changes");
let mut secret_watcher = secrets
.watch(&ListParams::default(), "0")
.await
.unwrap()
.boxed();
while let Ok(Some(secret_status)) = secret_watcher.try_next().await {
// If the configuration changes, trigger a panic which will cause a restart.
match secret_status {
WatchEvent::Modified(modified) => {
if modified.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
WatchEvent::Deleted(deleted) => {
if deleted.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
_ => {},
}
}
info!(secret_logger, "Restarting Secret watcher");
}
}));
join_all(handles).await;
Ok(())
}
|
//! however, useful for making SPF records point to an outbound mail record,
//! where the mail can be sent from one of many Nodes.
//!
|
random_line_split
|
main.rs
|
// vim:set et sw=4 ts=4 foldmethod=marker:
#![warn(clippy::all, clippy::pedantic)]
#![warn(missing_docs)]
#![recursion_limit="512"]
// starting doc {{{
//! ARES: Automatic REcord System.
//!
//! A Kubernetes-native system to automatically create and manage DNS records
//! meant to run in parallel with External DNS.
//!
//! Configuration is managed through the ares-secret Secret, typically in the
//! default namespace. This may change in the future to default to the
//! namespace that ARES is deployed in.
//!
//! ## Configuration
//!
//! A configuration file should look like this:
//!
//! ```yaml
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! The corresponding Secret can look like:
//!
//! ```yaml
//! apiVersion: v1
//! kind: Secret
//! metadata:
//! name: ares-secret
//! stringData:
//! ares.yaml: |-
//! - selector:
//! - syntixi.io
//! provider: cloudflare
//! providerOptions:
//! apiToken: ***
//! ```
//!
//! If you want to control multiple domain zones across multiple different
//! providers, you can add another element into the default array and
//! configure another provider there. You can configure multiple domain zones
//! through a single provider.
//!
//! ## Custom Resource Definitions
//!
//! ARES watches over the syntixi.io/v1alpha1/Record CustomResourceDefinition
//! to know which domain names to add, remove, or modify. An example resource
//! is below.
//!
//! ```yaml
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example
//! spec:
//! fqdn: example.syntixi.io
//! ttl: 100
//! type: CNAME
//! value:
//! - syntixi.io
//! ```
//!
//! For addresses that can change, such as Nodes that Pods may be running on,
//! it is recommended to instead use a valueFrom selector, such as the
//! PodSelector. The example below includes a Pod and a Record that points to
//! the Node the Pod is running on, with a Selector similar to that in the
//! Kubernetes
//! [documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
//!
//! This should not be used for inbound traffic (for that, you should use a
//! LoadBalancer Service or an Ingress record, with external-dns). This is,
//! however, useful for making SPF records point to an outbound mail record,
//! where the mail can be sent from one of many Nodes.
//!
//! ```yaml
//! apiVersion: v1
//! kind: Pod
//! metadata:
//! name: nginx-hello-world
//! app: nginx
//! spec:
//! containers:
//! - name: nginx
//! image: nginxdemos/hello
//! ---
//! apiVersion: syntixi.io/v1alpha1
//! kind: Record
//! metadata:
//! name: example-selector
//! spec:
//! fqdn: selector.syntixi.io
//! ttl: 1
//! valueFrom:
//! podSelector:
//! matchLabels:
//! app: nginx
//! ```
//!
//! When a syntixi.io/v1alpha1/Record resource is created, an additional record
//! is made for ARES to track ownership over the DNS record. So long as that
//! tracking record exists, when the Kubernetes resource is deleted, the
//! corresponding record and tracking record will be deleted.
// }}}
// imports {{{
use clap::Clap;
use std::ops::Deref;
use std::sync::Arc;
use slog::{
crit, debug, error, info, log, o,
Drain, Logger,
};
use anyhow::{anyhow, Result};
use futures::{
StreamExt, TryStreamExt, select,
future::{Future, Fuse, join_all},
};
use k8s_openapi::api::core::v1::{Event, Secret};
use kube::{
api::{Api, ListParams, Meta, WatchEvent},
Client,
};
use kube_runtime::{utils::try_flatten_applied, watcher};
use kube_derive::{CustomResource};
mod cli;
mod xpathable;
mod providers;
mod program_config;
mod record_spec;
use program_config::AresConfig;
use providers::{ProviderConfig, util::{ProviderBackend, ZoneDomainName,
RecordType, Record as RecordObject}};
use record_spec::{Record, RecordValueCollector};
// }}}
#[tokio::main]
async fn main() -> Result<()>
|
let config_content = config_data
.get(opts.secret_key.as_str())
.ok_or(anyhow!("Unable to get key from Secret"))?
.clone().0;
debug!(root_logger, "Configuration loaded from Secret");
let config: Vec<Arc<AresConfig>> =
serde_yaml::from_str::<Vec<_>>(std::str::from_utf8(&config_content[..])?)?
.into_iter()
.map(Arc::new)
.collect();
let records: Api<Record> = Api::all(Client::try_default().await?);
let record_list: Vec<Arc<Record>> = records.list(&ListParams::default()).await?
.items
.into_iter()
.map(Arc::new)
.collect();
let mut handles = vec![];
// TODO watch over config and reload when changes are made
for ares in config.into_iter() {
// Find all matching Records and put a ref of them into a Vec
let allowed_records: Vec<Arc<Record>> = record_list
.iter()
.filter(|record| ares.matches_selector(record.spec.fqdn.as_str()))
.map(|x| x.clone()) // clone() of Arc<> is intentional
.collect();
// TODO put a watcher over records instead of just getting them at program start
for mut record in allowed_records {
// Generate a proxy logger to be cloned so we can build upon it every loop
let proxy_logger = root_logger.new(o!());
let sub_ac = ares.clone(); // clone of Arc<> is intentional
handles.push(tokio::spawn(async move {
loop {
let sub_logger = proxy_logger.new(o!("record" => record.spec.fqdn.clone()));
if let Some(collector_obj) = &record.spec.value_from {
let collector = collector_obj.deref();
info!(sub_logger, "Getting zone domain name");
let zone = match sub_ac.provider.get_zone(&record.spec.fqdn).await {
Ok(z) => z,
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
};
let mut builder = RecordObject::builder(record.spec.fqdn.clone(),
zone, RecordType::A);
// Syncing should happen regardless of using a watcher to ensure that any
// extra records are deleted.
info!(sub_logger, "Syncing");
let sync_state = collector.sync(&record.metadata, &sub_ac.provider,
&mut builder).await;
if let Err(e) = sync_state {
crit!(sub_logger, "Error! {}", e);
break
}
info!(sub_logger, "Finished syncing");
info!(sub_logger, "Spawning watcher");
let res = collector.watch_values(&record.metadata, &sub_ac.provider,
&mut builder).await;
info!(sub_logger, "Stopped watching");
// Set a new record if the watcher stops; this could be the result of a
// timeout or a change in the Record value, which may need a refresh.
record = match res {
Ok(r) => Arc::new(r),
Err(e) => {
crit!(sub_logger, "Error! {}", e);
break
}
}
}
}
}));
}
}
let secret_logger = root_logger.new(o!());
handles.push(tokio::spawn(async move {
loop {
info!(secret_logger, "Watching over Secrets to detect configuration changes");
let mut secret_watcher = secrets
.watch(&ListParams::default(), "0")
.await
.unwrap()
.boxed();
while let Ok(Some(secret_status)) = secret_watcher.try_next().await {
// If the configuration changes, trigger a panic which will cause a restart.
match secret_status {
WatchEvent::Modified(modified) => {
if modified.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
WatchEvent::Deleted(deleted) => {
if deleted.metadata.uid == secret.metadata.uid {
info!(secret_logger, "Found config change, terminating");
std::process::exit(0);
}
},
_ => {},
}
}
info!(secret_logger, "Restarting Secret watcher");
}
}));
join_all(handles).await;
Ok(())
}
|
{
let opts: cli::Opts = cli::Opts::parse();
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
let root_logger = slog::Logger::root(
drain,
o!("secret" => opts.secret.clone(),
"secret_key" => opts.secret_key.clone(),
"secret_namespace" => opts.secret_namespace.clone()),
);
let client = Client::try_default().await?;
info!(root_logger, "Loading configuration from Secret");
let secrets: Api<Secret> = Api::namespaced(client, opts.secret_namespace.as_str());
let secret = secrets.get(opts.secret.as_str()).await?;
let config_data = secret
.data
.as_ref()
.ok_or(anyhow!("Unable to get data from Secret"))?;
|
identifier_body
|
core.rs
|
// Copyright (c) 2016-2021 Fabian Schuiki
use crate::lexer::token::*;
use crate::parser::rules::{Parser, Recovered, RecoveredResult, Reported, ReportedResult};
use moore_common::errors::*;
use moore_common::name::*;
use moore_common::source::*;
use std;
use std::fmt::Display;
use std::marker::PhantomData;
/// A predicate that matches on the current position of a token stream.
pub trait Predicate<P: Parser>: Display {
/// Match the predicate against the current position of the parser.
fn matches(&mut self, _: &mut P) -> bool;
/// Skip tokens in the parser until the predicate matches. Optionally also
/// consume the tokens that make up the predicate.
fn recover(&mut self, _: &mut P, consume: bool);
}
impl<P> Predicate<P> for Token
where
P: Parser,
{
fn matches(&mut self, p: &mut P) -> bool {
p.peek(0).value == *self
}
fn recover(&mut self, p: &mut P, consume: bool) {
recover(p, &[*self], consume)
}
}
/// A function predicate.
struct FuncPredicate<P: Parser, M: FnMut(&mut P) -> bool, R: FnMut(&mut P, bool)> {
match_func: M,
recover_func: R,
desc: &'static str,
_marker: PhantomData<P>,
}
impl<P, M, R> Predicate<P> for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn matches(&mut self, p: &mut P) -> bool {
(self.match_func)(p)
}
fn recover(&mut self, p: &mut P, consume: bool) {
(self.recover_func)(p, consume)
}
}
impl<P, M, R> Display for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.desc)
}
}
pub struct TokenPredicate<P: Parser, T: Predicate<P>> {
inner: T,
token: Token,
_marker: PhantomData<P>,
}
impl<P, T> TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
/// Create a new token predicate.
pub fn new(inner: T, token: Token) -> TokenPredicate<P, T> {
TokenPredicate {
inner: inner,
token: token,
_marker: PhantomData,
}
}
}
impl<P, T> Predicate<P> for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn matches(&mut self, p: &mut P) -> bool {
self.inner.matches(p) || p.peek(0).value == self.token
}
fn recover(&mut self, p: &mut P, consume: bool) {
self.inner.recover(p, consume)
}
}
impl<P, T> Display for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}, {}", self.inner, self.token)
}
}
macro_rules! token_predicate {
($token:expr) => ($token);
($token1:expr, $token2:expr $(, $tokens:expr)*) => (
TokenPredicate::new(token_predicate!($token2 $(, $tokens)*), $token1)
);
}
// TODO: Document this!
pub fn recover<P: Parser>(p: &mut P, term: &[Token], eat_term: bool) {
let mut stack = Vec::new();
loop {
let Spanned {
value: tkn,
span: sp,
} = p.peek(0);
if stack.is_empty() {
for t in term {
if *t == tkn {
if eat_term {
p.skip();
}
return;
}
}
}
// p.emit(
// DiagBuilder2::note("Skipped during recovery")
// .span(sp)
// );
match tkn {
OpenDelim(x) => stack.push(x),
CloseDelim(x) => {
if let Some(open) = stack.pop() {
if open!= x {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} which is not the complement to the previous \
opening {}",
CloseDelim(x),
OpenDelim(open)
))
.span(sp),
);
break;
}
} else {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} without an earlier opening {}",
CloseDelim(x),
OpenDelim(x)
))
.span(sp),
);
break;
}
}
Eof => break,
_ => (),
}
p.skip();
}
}
/// Apply a parser and if it fails, recover to one of a list of tokens. This
/// turns reported into recovered errors.
pub fn recovered<P: Parser, R, F>(
p: &mut P,
term: &[Token],
eat_term: bool,
mut parse: F,
) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> Result<R, Reported>,
{
match parse(p) {
Ok(x) => Ok(x),
Err(Reported) => {
recover(p, term, eat_term);
Err(Recovered)
}
}
}
/// Consume a token if it is present, do nothing otherwise.
pub fn accept<P: Parser>(p: &mut P, expect: Token) -> bool {
if p.peek(0).value == expect {
p.bump();
true
} else {
false
}
}
/// Consume a specific token, or emit an error if the next token in the stream
/// does not match the expected one.
pub fn require<P: Parser>(p: &mut P, expect: Token) -> ReportedResult<()> {
let Spanned {
value: actual,
span,
} = p.peek(0);
if actual == expect {
p.bump();
Ok(())
} else {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", expect, actual))
.span(span),
);
Err(Reported)
}
}
/// Repeatedly apply a parser until it returns `None`.
pub fn repeat<P: Parser, R, F, E>(p: &mut P, mut parse: F) -> Result<Vec<R>, E>
where
F: FnMut(&mut P) -> Result<Option<R>, E>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof {
match parse(p)? {
Some(x) => v.push(x),
None => break,
}
}
Ok(v)
}
/// Repeatedly apply a parser until a certain predicate matches.
pub fn repeat_until<P: Parser, R, F, T>(
p: &mut P,
mut term: T,
mut parse: F,
) -> Result<Vec<R>, Recovered>
where
F: FnMut(&mut P) -> Result<R, Reported>,
T: Predicate<P>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof &&!term.matches(p) {
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
}
Ok(v)
}
/// Parse a list of items separated with a specific token, until a terminator
/// oktne has been reached. The terminator is not consumed.
pub fn separated<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
mut term: T,
msg: &M,
mut parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let mut v = Vec::new();
while!p.is_fatal() && p.peek(0).value!= Eof &&!term.matches(p) {
// Parse the item. If the parser fails, recover to the terminator and
// abort.
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
// Try to match the terminator. If it does not, consume a separator and
// catch the case where the separator is immediately followed by the
// terminator (superfluous separator).
if term.matches(p) {
break;
} else if accept(p, sep) {
if term.matches(p) {
let q = p.last_span();
p.emit(DiagBuilder2::warning(format!("Superfluous trailing {}", sep)).span(q));
break;
}
} else {
let Spanned { value: tkn, span } = p.peek(0);
p.emit(
DiagBuilder2::error(format!(
"Expected {} or {} after {}, but found {} instead",
term, sep, msg, tkn
))
.span(span),
);
term.recover(p, false);
return Err(Recovered);
}
}
Ok(v)
}
/// Parse a non-empty list. See `separated` for details.
pub fn separated_nonempty<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
term: T,
msg: &M,
parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let q = p.peek(0).span;
let v = separated(p, sep, term, msg, parse)?;
if v.is_empty() {
p.emit(DiagBuilder2::error(format!("Expected at least one {}", msg)).span(q));
Err(Recovered)
} else {
Ok(v)
}
}
/// Parses the opening delimiter, calls the `inner` function, and parses the
/// closing delimiter. Properly recovers to and including the closing
/// delimiter if the `inner` function throws an error.
pub fn flanked<P: Parser, R, F>(p: &mut P, delim: DelimToken, mut inner: F) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
require(p, OpenDelim(delim)).map_err(|_| Recovered)?;
match inner(p) {
Ok(r) => match require(p, CloseDelim(delim)) {
Ok(_) => Ok(r),
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
},
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
}
}
/// If the opening delimiter is present, consumes it, calls the `inner`
/// function, and parses the closing delimiter. Properly recovers to and
/// including the closing delimiter if the `inner` function throws an error.
/// If the opening delimiter is not present, returns `None`.
pub fn try_flanked<P: Parser, R, F>(
p: &mut P,
delim: DelimToken,
inner: F,
) -> RecoveredResult<Option<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
if p.peek(0).value == OpenDelim(delim) {
flanked(p, delim, inner).map(|r| Some(r))
} else {
Ok(None)
}
}
/// Parse an identifier.
pub fn parse_ident<P: Parser, M: Display>(p: &mut P, msg: M) -> ReportedResult<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Ok(Spanned::new(n, span))
}
wrong => {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", msg, wrong))
.span(span),
);
Err(Reported)
}
}
}
/// Try to parse an identifier.
pub fn
|
<P: Parser>(p: &mut P) -> Option<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Some(Spanned::new(n, span))
}
_ => None,
}
}
/// Determine the earliest occurring token from a set. Useful to determine which
/// of two or more tokens appears first in a stream.
pub fn earliest<P: Parser>(p: &mut P, tokens: &[Token]) -> Spanned<Token> {
for i in 0.. {
let pk = p.peek(i);
if pk.value == Eof {
return pk;
}
for t in tokens {
if *t == pk.value {
return pk;
}
}
}
unreachable!();
}
|
try_ident
|
identifier_name
|
core.rs
|
// Copyright (c) 2016-2021 Fabian Schuiki
use crate::lexer::token::*;
use crate::parser::rules::{Parser, Recovered, RecoveredResult, Reported, ReportedResult};
use moore_common::errors::*;
use moore_common::name::*;
use moore_common::source::*;
use std;
use std::fmt::Display;
use std::marker::PhantomData;
/// A predicate that matches on the current position of a token stream.
pub trait Predicate<P: Parser>: Display {
/// Match the predicate against the current position of the parser.
fn matches(&mut self, _: &mut P) -> bool;
/// Skip tokens in the parser until the predicate matches. Optionally also
/// consume the tokens that make up the predicate.
fn recover(&mut self, _: &mut P, consume: bool);
}
impl<P> Predicate<P> for Token
where
P: Parser,
{
fn matches(&mut self, p: &mut P) -> bool {
p.peek(0).value == *self
}
fn recover(&mut self, p: &mut P, consume: bool) {
recover(p, &[*self], consume)
}
}
/// A function predicate.
struct FuncPredicate<P: Parser, M: FnMut(&mut P) -> bool, R: FnMut(&mut P, bool)> {
match_func: M,
recover_func: R,
desc: &'static str,
_marker: PhantomData<P>,
}
impl<P, M, R> Predicate<P> for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn matches(&mut self, p: &mut P) -> bool {
(self.match_func)(p)
}
fn recover(&mut self, p: &mut P, consume: bool) {
(self.recover_func)(p, consume)
}
}
impl<P, M, R> Display for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.desc)
}
}
pub struct TokenPredicate<P: Parser, T: Predicate<P>> {
inner: T,
token: Token,
_marker: PhantomData<P>,
}
impl<P, T> TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
/// Create a new token predicate.
pub fn new(inner: T, token: Token) -> TokenPredicate<P, T> {
TokenPredicate {
inner: inner,
token: token,
_marker: PhantomData,
}
}
}
impl<P, T> Predicate<P> for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn matches(&mut self, p: &mut P) -> bool {
self.inner.matches(p) || p.peek(0).value == self.token
}
fn recover(&mut self, p: &mut P, consume: bool) {
self.inner.recover(p, consume)
}
}
impl<P, T> Display for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}, {}", self.inner, self.token)
}
}
macro_rules! token_predicate {
($token:expr) => ($token);
($token1:expr, $token2:expr $(, $tokens:expr)*) => (
TokenPredicate::new(token_predicate!($token2 $(, $tokens)*), $token1)
);
}
// TODO: Document this!
pub fn recover<P: Parser>(p: &mut P, term: &[Token], eat_term: bool) {
let mut stack = Vec::new();
loop {
let Spanned {
value: tkn,
span: sp,
} = p.peek(0);
if stack.is_empty() {
for t in term {
if *t == tkn {
if eat_term {
p.skip();
}
return;
}
}
}
// p.emit(
// DiagBuilder2::note("Skipped during recovery")
// .span(sp)
// );
match tkn {
OpenDelim(x) => stack.push(x),
CloseDelim(x) => {
if let Some(open) = stack.pop() {
if open!= x {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} which is not the complement to the previous \
opening {}",
CloseDelim(x),
OpenDelim(open)
))
.span(sp),
);
break;
}
} else {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} without an earlier opening {}",
CloseDelim(x),
OpenDelim(x)
))
.span(sp),
);
break;
}
}
Eof => break,
_ => (),
}
p.skip();
}
}
/// Apply a parser and if it fails, recover to one of a list of tokens. This
/// turns reported into recovered errors.
pub fn recovered<P: Parser, R, F>(
p: &mut P,
term: &[Token],
eat_term: bool,
mut parse: F,
) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> Result<R, Reported>,
{
match parse(p) {
Ok(x) => Ok(x),
Err(Reported) => {
recover(p, term, eat_term);
Err(Recovered)
}
}
}
/// Consume a token if it is present, do nothing otherwise.
pub fn accept<P: Parser>(p: &mut P, expect: Token) -> bool {
if p.peek(0).value == expect {
p.bump();
true
} else {
false
}
}
/// Consume a specific token, or emit an error if the next token in the stream
/// does not match the expected one.
pub fn require<P: Parser>(p: &mut P, expect: Token) -> ReportedResult<()> {
let Spanned {
value: actual,
span,
} = p.peek(0);
if actual == expect {
p.bump();
Ok(())
} else {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", expect, actual))
.span(span),
);
Err(Reported)
}
}
/// Repeatedly apply a parser until it returns `None`.
pub fn repeat<P: Parser, R, F, E>(p: &mut P, mut parse: F) -> Result<Vec<R>, E>
where
F: FnMut(&mut P) -> Result<Option<R>, E>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof {
match parse(p)? {
Some(x) => v.push(x),
None => break,
}
}
Ok(v)
}
/// Repeatedly apply a parser until a certain predicate matches.
pub fn repeat_until<P: Parser, R, F, T>(
p: &mut P,
mut term: T,
mut parse: F,
) -> Result<Vec<R>, Recovered>
where
F: FnMut(&mut P) -> Result<R, Reported>,
T: Predicate<P>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof &&!term.matches(p) {
|
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
}
Ok(v)
}
/// Parse a list of items separated with a specific token, until a terminator
/// oktne has been reached. The terminator is not consumed.
pub fn separated<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
mut term: T,
msg: &M,
mut parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let mut v = Vec::new();
while!p.is_fatal() && p.peek(0).value!= Eof &&!term.matches(p) {
// Parse the item. If the parser fails, recover to the terminator and
// abort.
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
// Try to match the terminator. If it does not, consume a separator and
// catch the case where the separator is immediately followed by the
// terminator (superfluous separator).
if term.matches(p) {
break;
} else if accept(p, sep) {
if term.matches(p) {
let q = p.last_span();
p.emit(DiagBuilder2::warning(format!("Superfluous trailing {}", sep)).span(q));
break;
}
} else {
let Spanned { value: tkn, span } = p.peek(0);
p.emit(
DiagBuilder2::error(format!(
"Expected {} or {} after {}, but found {} instead",
term, sep, msg, tkn
))
.span(span),
);
term.recover(p, false);
return Err(Recovered);
}
}
Ok(v)
}
/// Parse a non-empty list. See `separated` for details.
pub fn separated_nonempty<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
term: T,
msg: &M,
parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let q = p.peek(0).span;
let v = separated(p, sep, term, msg, parse)?;
if v.is_empty() {
p.emit(DiagBuilder2::error(format!("Expected at least one {}", msg)).span(q));
Err(Recovered)
} else {
Ok(v)
}
}
/// Parses the opening delimiter, calls the `inner` function, and parses the
/// closing delimiter. Properly recovers to and including the closing
/// delimiter if the `inner` function throws an error.
pub fn flanked<P: Parser, R, F>(p: &mut P, delim: DelimToken, mut inner: F) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
require(p, OpenDelim(delim)).map_err(|_| Recovered)?;
match inner(p) {
Ok(r) => match require(p, CloseDelim(delim)) {
Ok(_) => Ok(r),
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
},
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
}
}
/// If the opening delimiter is present, consumes it, calls the `inner`
/// function, and parses the closing delimiter. Properly recovers to and
/// including the closing delimiter if the `inner` function throws an error.
/// If the opening delimiter is not present, returns `None`.
pub fn try_flanked<P: Parser, R, F>(
p: &mut P,
delim: DelimToken,
inner: F,
) -> RecoveredResult<Option<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
if p.peek(0).value == OpenDelim(delim) {
flanked(p, delim, inner).map(|r| Some(r))
} else {
Ok(None)
}
}
/// Parse an identifier.
pub fn parse_ident<P: Parser, M: Display>(p: &mut P, msg: M) -> ReportedResult<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Ok(Spanned::new(n, span))
}
wrong => {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", msg, wrong))
.span(span),
);
Err(Reported)
}
}
}
/// Try to parse an identifier.
pub fn try_ident<P: Parser>(p: &mut P) -> Option<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Some(Spanned::new(n, span))
}
_ => None,
}
}
/// Determine the earliest occurring token from a set. Useful to determine which
/// of two or more tokens appears first in a stream.
pub fn earliest<P: Parser>(p: &mut P, tokens: &[Token]) -> Spanned<Token> {
for i in 0.. {
let pk = p.peek(i);
if pk.value == Eof {
return pk;
}
for t in tokens {
if *t == pk.value {
return pk;
}
}
}
unreachable!();
}
|
match parse(p) {
|
random_line_split
|
core.rs
|
// Copyright (c) 2016-2021 Fabian Schuiki
use crate::lexer::token::*;
use crate::parser::rules::{Parser, Recovered, RecoveredResult, Reported, ReportedResult};
use moore_common::errors::*;
use moore_common::name::*;
use moore_common::source::*;
use std;
use std::fmt::Display;
use std::marker::PhantomData;
/// A predicate that matches on the current position of a token stream.
pub trait Predicate<P: Parser>: Display {
/// Match the predicate against the current position of the parser.
fn matches(&mut self, _: &mut P) -> bool;
/// Skip tokens in the parser until the predicate matches. Optionally also
/// consume the tokens that make up the predicate.
fn recover(&mut self, _: &mut P, consume: bool);
}
impl<P> Predicate<P> for Token
where
P: Parser,
{
fn matches(&mut self, p: &mut P) -> bool {
p.peek(0).value == *self
}
fn recover(&mut self, p: &mut P, consume: bool) {
recover(p, &[*self], consume)
}
}
/// A function predicate.
struct FuncPredicate<P: Parser, M: FnMut(&mut P) -> bool, R: FnMut(&mut P, bool)> {
match_func: M,
recover_func: R,
desc: &'static str,
_marker: PhantomData<P>,
}
impl<P, M, R> Predicate<P> for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn matches(&mut self, p: &mut P) -> bool {
(self.match_func)(p)
}
fn recover(&mut self, p: &mut P, consume: bool) {
(self.recover_func)(p, consume)
}
}
impl<P, M, R> Display for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.desc)
}
}
pub struct TokenPredicate<P: Parser, T: Predicate<P>> {
inner: T,
token: Token,
_marker: PhantomData<P>,
}
impl<P, T> TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
/// Create a new token predicate.
pub fn new(inner: T, token: Token) -> TokenPredicate<P, T>
|
}
impl<P, T> Predicate<P> for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn matches(&mut self, p: &mut P) -> bool {
self.inner.matches(p) || p.peek(0).value == self.token
}
fn recover(&mut self, p: &mut P, consume: bool) {
self.inner.recover(p, consume)
}
}
impl<P, T> Display for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}, {}", self.inner, self.token)
}
}
macro_rules! token_predicate {
($token:expr) => ($token);
($token1:expr, $token2:expr $(, $tokens:expr)*) => (
TokenPredicate::new(token_predicate!($token2 $(, $tokens)*), $token1)
);
}
// TODO: Document this!
pub fn recover<P: Parser>(p: &mut P, term: &[Token], eat_term: bool) {
let mut stack = Vec::new();
loop {
let Spanned {
value: tkn,
span: sp,
} = p.peek(0);
if stack.is_empty() {
for t in term {
if *t == tkn {
if eat_term {
p.skip();
}
return;
}
}
}
// p.emit(
// DiagBuilder2::note("Skipped during recovery")
// .span(sp)
// );
match tkn {
OpenDelim(x) => stack.push(x),
CloseDelim(x) => {
if let Some(open) = stack.pop() {
if open!= x {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} which is not the complement to the previous \
opening {}",
CloseDelim(x),
OpenDelim(open)
))
.span(sp),
);
break;
}
} else {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} without an earlier opening {}",
CloseDelim(x),
OpenDelim(x)
))
.span(sp),
);
break;
}
}
Eof => break,
_ => (),
}
p.skip();
}
}
/// Apply a parser and if it fails, recover to one of a list of tokens. This
/// turns reported into recovered errors.
pub fn recovered<P: Parser, R, F>(
p: &mut P,
term: &[Token],
eat_term: bool,
mut parse: F,
) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> Result<R, Reported>,
{
match parse(p) {
Ok(x) => Ok(x),
Err(Reported) => {
recover(p, term, eat_term);
Err(Recovered)
}
}
}
/// Consume a token if it is present, do nothing otherwise.
pub fn accept<P: Parser>(p: &mut P, expect: Token) -> bool {
if p.peek(0).value == expect {
p.bump();
true
} else {
false
}
}
/// Consume a specific token, or emit an error if the next token in the stream
/// does not match the expected one.
pub fn require<P: Parser>(p: &mut P, expect: Token) -> ReportedResult<()> {
let Spanned {
value: actual,
span,
} = p.peek(0);
if actual == expect {
p.bump();
Ok(())
} else {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", expect, actual))
.span(span),
);
Err(Reported)
}
}
/// Repeatedly apply a parser until it returns `None`.
pub fn repeat<P: Parser, R, F, E>(p: &mut P, mut parse: F) -> Result<Vec<R>, E>
where
F: FnMut(&mut P) -> Result<Option<R>, E>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof {
match parse(p)? {
Some(x) => v.push(x),
None => break,
}
}
Ok(v)
}
/// Repeatedly apply a parser until a certain predicate matches.
pub fn repeat_until<P: Parser, R, F, T>(
p: &mut P,
mut term: T,
mut parse: F,
) -> Result<Vec<R>, Recovered>
where
F: FnMut(&mut P) -> Result<R, Reported>,
T: Predicate<P>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof &&!term.matches(p) {
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
}
Ok(v)
}
/// Parse a list of items separated with a specific token, until a terminator
/// oktne has been reached. The terminator is not consumed.
pub fn separated<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
mut term: T,
msg: &M,
mut parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let mut v = Vec::new();
while!p.is_fatal() && p.peek(0).value!= Eof &&!term.matches(p) {
// Parse the item. If the parser fails, recover to the terminator and
// abort.
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
// Try to match the terminator. If it does not, consume a separator and
// catch the case where the separator is immediately followed by the
// terminator (superfluous separator).
if term.matches(p) {
break;
} else if accept(p, sep) {
if term.matches(p) {
let q = p.last_span();
p.emit(DiagBuilder2::warning(format!("Superfluous trailing {}", sep)).span(q));
break;
}
} else {
let Spanned { value: tkn, span } = p.peek(0);
p.emit(
DiagBuilder2::error(format!(
"Expected {} or {} after {}, but found {} instead",
term, sep, msg, tkn
))
.span(span),
);
term.recover(p, false);
return Err(Recovered);
}
}
Ok(v)
}
/// Parse a non-empty list. See `separated` for details.
pub fn separated_nonempty<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
term: T,
msg: &M,
parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let q = p.peek(0).span;
let v = separated(p, sep, term, msg, parse)?;
if v.is_empty() {
p.emit(DiagBuilder2::error(format!("Expected at least one {}", msg)).span(q));
Err(Recovered)
} else {
Ok(v)
}
}
/// Parses the opening delimiter, calls the `inner` function, and parses the
/// closing delimiter. Properly recovers to and including the closing
/// delimiter if the `inner` function throws an error.
pub fn flanked<P: Parser, R, F>(p: &mut P, delim: DelimToken, mut inner: F) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
require(p, OpenDelim(delim)).map_err(|_| Recovered)?;
match inner(p) {
Ok(r) => match require(p, CloseDelim(delim)) {
Ok(_) => Ok(r),
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
},
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
}
}
/// If the opening delimiter is present, consumes it, calls the `inner`
/// function, and parses the closing delimiter. Properly recovers to and
/// including the closing delimiter if the `inner` function throws an error.
/// If the opening delimiter is not present, returns `None`.
pub fn try_flanked<P: Parser, R, F>(
p: &mut P,
delim: DelimToken,
inner: F,
) -> RecoveredResult<Option<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
if p.peek(0).value == OpenDelim(delim) {
flanked(p, delim, inner).map(|r| Some(r))
} else {
Ok(None)
}
}
/// Parse an identifier.
pub fn parse_ident<P: Parser, M: Display>(p: &mut P, msg: M) -> ReportedResult<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Ok(Spanned::new(n, span))
}
wrong => {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", msg, wrong))
.span(span),
);
Err(Reported)
}
}
}
/// Try to parse an identifier.
pub fn try_ident<P: Parser>(p: &mut P) -> Option<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Some(Spanned::new(n, span))
}
_ => None,
}
}
/// Determine the earliest occurring token from a set. Useful to determine which
/// of two or more tokens appears first in a stream.
pub fn earliest<P: Parser>(p: &mut P, tokens: &[Token]) -> Spanned<Token> {
for i in 0.. {
let pk = p.peek(i);
if pk.value == Eof {
return pk;
}
for t in tokens {
if *t == pk.value {
return pk;
}
}
}
unreachable!();
}
|
{
TokenPredicate {
inner: inner,
token: token,
_marker: PhantomData,
}
}
|
identifier_body
|
core.rs
|
// Copyright (c) 2016-2021 Fabian Schuiki
use crate::lexer::token::*;
use crate::parser::rules::{Parser, Recovered, RecoveredResult, Reported, ReportedResult};
use moore_common::errors::*;
use moore_common::name::*;
use moore_common::source::*;
use std;
use std::fmt::Display;
use std::marker::PhantomData;
/// A predicate that matches on the current position of a token stream.
pub trait Predicate<P: Parser>: Display {
/// Match the predicate against the current position of the parser.
fn matches(&mut self, _: &mut P) -> bool;
/// Skip tokens in the parser until the predicate matches. Optionally also
/// consume the tokens that make up the predicate.
fn recover(&mut self, _: &mut P, consume: bool);
}
impl<P> Predicate<P> for Token
where
P: Parser,
{
fn matches(&mut self, p: &mut P) -> bool {
p.peek(0).value == *self
}
fn recover(&mut self, p: &mut P, consume: bool) {
recover(p, &[*self], consume)
}
}
/// A function predicate.
struct FuncPredicate<P: Parser, M: FnMut(&mut P) -> bool, R: FnMut(&mut P, bool)> {
match_func: M,
recover_func: R,
desc: &'static str,
_marker: PhantomData<P>,
}
impl<P, M, R> Predicate<P> for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn matches(&mut self, p: &mut P) -> bool {
(self.match_func)(p)
}
fn recover(&mut self, p: &mut P, consume: bool) {
(self.recover_func)(p, consume)
}
}
impl<P, M, R> Display for FuncPredicate<P, M, R>
where
P: Parser,
M: FnMut(&mut P) -> bool,
R: FnMut(&mut P, bool),
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.desc)
}
}
pub struct TokenPredicate<P: Parser, T: Predicate<P>> {
inner: T,
token: Token,
_marker: PhantomData<P>,
}
impl<P, T> TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
/// Create a new token predicate.
pub fn new(inner: T, token: Token) -> TokenPredicate<P, T> {
TokenPredicate {
inner: inner,
token: token,
_marker: PhantomData,
}
}
}
impl<P, T> Predicate<P> for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn matches(&mut self, p: &mut P) -> bool {
self.inner.matches(p) || p.peek(0).value == self.token
}
fn recover(&mut self, p: &mut P, consume: bool) {
self.inner.recover(p, consume)
}
}
impl<P, T> Display for TokenPredicate<P, T>
where
P: Parser,
T: Predicate<P>,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}, {}", self.inner, self.token)
}
}
macro_rules! token_predicate {
($token:expr) => ($token);
($token1:expr, $token2:expr $(, $tokens:expr)*) => (
TokenPredicate::new(token_predicate!($token2 $(, $tokens)*), $token1)
);
}
// TODO: Document this!
pub fn recover<P: Parser>(p: &mut P, term: &[Token], eat_term: bool) {
let mut stack = Vec::new();
loop {
let Spanned {
value: tkn,
span: sp,
} = p.peek(0);
if stack.is_empty() {
for t in term {
if *t == tkn {
if eat_term {
p.skip();
}
return;
}
}
}
// p.emit(
// DiagBuilder2::note("Skipped during recovery")
// .span(sp)
// );
match tkn {
OpenDelim(x) => stack.push(x),
CloseDelim(x) => {
if let Some(open) = stack.pop() {
if open!= x {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} which is not the complement to the previous \
opening {}",
CloseDelim(x),
OpenDelim(open)
))
.span(sp),
);
break;
}
} else {
p.emit(
DiagBuilder2::fatal(format!(
"Found closing {} without an earlier opening {}",
CloseDelim(x),
OpenDelim(x)
))
.span(sp),
);
break;
}
}
Eof => break,
_ => (),
}
p.skip();
}
}
/// Apply a parser and if it fails, recover to one of a list of tokens. This
/// turns reported into recovered errors.
pub fn recovered<P: Parser, R, F>(
p: &mut P,
term: &[Token],
eat_term: bool,
mut parse: F,
) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> Result<R, Reported>,
{
match parse(p) {
Ok(x) => Ok(x),
Err(Reported) => {
recover(p, term, eat_term);
Err(Recovered)
}
}
}
/// Consume a token if it is present, do nothing otherwise.
pub fn accept<P: Parser>(p: &mut P, expect: Token) -> bool {
if p.peek(0).value == expect {
p.bump();
true
} else
|
}
/// Consume a specific token, or emit an error if the next token in the stream
/// does not match the expected one.
pub fn require<P: Parser>(p: &mut P, expect: Token) -> ReportedResult<()> {
let Spanned {
value: actual,
span,
} = p.peek(0);
if actual == expect {
p.bump();
Ok(())
} else {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", expect, actual))
.span(span),
);
Err(Reported)
}
}
/// Repeatedly apply a parser until it returns `None`.
pub fn repeat<P: Parser, R, F, E>(p: &mut P, mut parse: F) -> Result<Vec<R>, E>
where
F: FnMut(&mut P) -> Result<Option<R>, E>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof {
match parse(p)? {
Some(x) => v.push(x),
None => break,
}
}
Ok(v)
}
/// Repeatedly apply a parser until a certain predicate matches.
pub fn repeat_until<P: Parser, R, F, T>(
p: &mut P,
mut term: T,
mut parse: F,
) -> Result<Vec<R>, Recovered>
where
F: FnMut(&mut P) -> Result<R, Reported>,
T: Predicate<P>,
{
let mut v = Vec::new();
while p.peek(0).value!= Eof &&!term.matches(p) {
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
}
Ok(v)
}
/// Parse a list of items separated with a specific token, until a terminator
/// oktne has been reached. The terminator is not consumed.
pub fn separated<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
mut term: T,
msg: &M,
mut parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let mut v = Vec::new();
while!p.is_fatal() && p.peek(0).value!= Eof &&!term.matches(p) {
// Parse the item. If the parser fails, recover to the terminator and
// abort.
match parse(p) {
Ok(x) => v.push(x),
Err(_) => {
term.recover(p, false);
return Err(Recovered);
}
}
// Try to match the terminator. If it does not, consume a separator and
// catch the case where the separator is immediately followed by the
// terminator (superfluous separator).
if term.matches(p) {
break;
} else if accept(p, sep) {
if term.matches(p) {
let q = p.last_span();
p.emit(DiagBuilder2::warning(format!("Superfluous trailing {}", sep)).span(q));
break;
}
} else {
let Spanned { value: tkn, span } = p.peek(0);
p.emit(
DiagBuilder2::error(format!(
"Expected {} or {} after {}, but found {} instead",
term, sep, msg, tkn
))
.span(span),
);
term.recover(p, false);
return Err(Recovered);
}
}
Ok(v)
}
/// Parse a non-empty list. See `separated` for details.
pub fn separated_nonempty<P: Parser, M, R, F, T>(
p: &mut P,
sep: Token,
term: T,
msg: &M,
parse: F,
) -> RecoveredResult<Vec<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
T: Predicate<P>,
M: Display +?Sized,
{
let q = p.peek(0).span;
let v = separated(p, sep, term, msg, parse)?;
if v.is_empty() {
p.emit(DiagBuilder2::error(format!("Expected at least one {}", msg)).span(q));
Err(Recovered)
} else {
Ok(v)
}
}
/// Parses the opening delimiter, calls the `inner` function, and parses the
/// closing delimiter. Properly recovers to and including the closing
/// delimiter if the `inner` function throws an error.
pub fn flanked<P: Parser, R, F>(p: &mut P, delim: DelimToken, mut inner: F) -> RecoveredResult<R>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
require(p, OpenDelim(delim)).map_err(|_| Recovered)?;
match inner(p) {
Ok(r) => match require(p, CloseDelim(delim)) {
Ok(_) => Ok(r),
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
},
Err(Reported) => {
recover(p, &[CloseDelim(delim)], true);
Err(Recovered)
}
}
}
/// If the opening delimiter is present, consumes it, calls the `inner`
/// function, and parses the closing delimiter. Properly recovers to and
/// including the closing delimiter if the `inner` function throws an error.
/// If the opening delimiter is not present, returns `None`.
pub fn try_flanked<P: Parser, R, F>(
p: &mut P,
delim: DelimToken,
inner: F,
) -> RecoveredResult<Option<R>>
where
F: FnMut(&mut P) -> ReportedResult<R>,
{
if p.peek(0).value == OpenDelim(delim) {
flanked(p, delim, inner).map(|r| Some(r))
} else {
Ok(None)
}
}
/// Parse an identifier.
pub fn parse_ident<P: Parser, M: Display>(p: &mut P, msg: M) -> ReportedResult<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Ok(Spanned::new(n, span))
}
wrong => {
p.emit(
DiagBuilder2::error(format!("Expected {}, but found {} instead", msg, wrong))
.span(span),
);
Err(Reported)
}
}
}
/// Try to parse an identifier.
pub fn try_ident<P: Parser>(p: &mut P) -> Option<Spanned<Name>> {
let Spanned { value, span } = p.peek(0);
match value {
Ident(n) => {
p.bump();
Some(Spanned::new(n, span))
}
_ => None,
}
}
/// Determine the earliest occurring token from a set. Useful to determine which
/// of two or more tokens appears first in a stream.
pub fn earliest<P: Parser>(p: &mut P, tokens: &[Token]) -> Spanned<Token> {
for i in 0.. {
let pk = p.peek(i);
if pk.value == Eof {
return pk;
}
for t in tokens {
if *t == pk.value {
return pk;
}
}
}
unreachable!();
}
|
{
false
}
|
conditional_block
|
ship_parser.rs
|
use log::{error, warn, debug, trace};
use serde_json::Value;
use serde_json::map::Map;
use crate::ballistics::{Ballistics, Dispersion};
use crate::gun::*;
use crate::download::{download, download_with_params};
use serde_derive::Deserialize;
use std::collections::HashMap;
use cgmath::{Matrix4, Point3};
use std::io::prelude::*;
use std::convert::TryInto;
fn parse_ballistics(ammo: &Map<String, Value>) -> Ballistics {
Ballistics::new(
ammo["bulletMass"].as_f64().unwrap(),
ammo["bulletDiametr"].as_f64().unwrap(),
ammo["bulletSpeed"].as_f64().unwrap(),
ammo["bulletAirDrag"].as_f64().unwrap(),
ammo["bulletKrupp"].as_f64().unwrap()
)
}
fn parse_ammotype(ammo: &Map<String, Value>) -> Ammo {
let ammotype = ammo["ammoType"].as_str().expect("Couldn't find ammoType");
debug!("Found ammo of type {}", ammotype);
let ballistics = parse_ballistics(ammo);
if ammotype == "HE" {
Ammo::new(
AmmoType::He(HeAmmo::new(
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["alphaPiercingHE"].as_f64().expect("Couldn't find alphaPiercingHE"),
)),
ballistics,
)
} else if ammotype == "AP" {
Ammo::new(
AmmoType::Ap(ApAmmo::new(
ammo["bulletDiametr"].as_f64().expect("Couldn't find bulletDiametr"),
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["bulletDetonator"].as_f64().expect("Couldn't find bulletDetonator"),
ammo["bulletDetonatorThreshold"].as_f64().expect("Couldn't find bulletDetonatorThreshold"),
)),
ballistics,
)
} else if ammotype == "CS" {
warn!("Found unimplemented ammo type CS!");
Ammo::new(
AmmoType::He(HeAmmo::new(1.0, 1.0)), ballistics)
} else {
error!("Found unknown ammo type {}!", ammotype);
panic!()
}
}
fn parse_artillery(artillery_spec: &Map<String, Value>) -> Vec<Gun> {
//debug!("{:#?}", artillery_spec);
let guns = artillery_spec["guns"].as_object().unwrap();
/*for (key,gun) in guns {
debug!("{}: {:?}", key, gun);
}*/
let dispersion = Dispersion::new(
artillery_spec["minDistH"].as_f64().expect("Couldn't find horizontal"),
artillery_spec["minDistV"].as_f64().expect("Couldn't find vertical"),
artillery_spec["maxDist"].as_f64().expect("Couldn't find maxrange"),
artillery_spec["sigmaCount"].as_f64().expect("Couldn't find sigmaCount")
);
guns.iter().map(|(_, gun)| {
let ammo_list = gun["ammoList"].as_object().expect("Couldn't get ammoList");
//debug!("{}: {:#?}", key, gun);
let ammo: Vec<_> = ammo_list.iter().map(|(_, ammo)| {
parse_ammotype(ammo.as_object().unwrap())
}).collect();
Gun::new(
dispersion.clone(),
ammo,
)
}).collect()
}
#[derive(Deserialize)]
struct ArmorMaterial {
#[serde(alias = "type")]
armor_type: usize,
thickness: usize, // mm
}
#[derive(Deserialize)]
struct ArmorGroup {
material: String,
indices: Vec<usize>,
}
#[derive(Deserialize)]
struct ArmorObject {
vertices: Vec<f64>,
groups: Vec<ArmorGroup>,
}
#[derive(Deserialize)]
struct GeometryObject {
armor: ArmorObject,
}
#[derive(Deserialize)]
struct RawGeometry {
objects: GeometryObject,
materials: HashMap<String, ArmorMaterial>,
}
impl RawGeometry {
pub fn to_armor_faces(self, matrix: Matrix4<f64>) -> Vec<ArmorFace> {
let mut vertices = vec!();
for i in 0..self.objects.armor.vertices.len()/3 {
let pnt = Point3::new(
self.objects.armor.vertices[i*3+0],
self.objects.armor.vertices[i*3+1],
self.objects.armor.vertices[i*3+2],
);
let pnt = Point3::from_homogeneous(matrix * pnt.to_homogeneous());
vertices.push(Point3::new(pnt.x, pnt.y, pnt.z));
}
let mut faces = vec!();
for group in self.objects.armor.groups {
let material = &self.materials[&group.material];
for i in 0..group.indices.len()/3 {
faces.push(ArmorFace::new(
[
vertices[group.indices[i*3+0]],
vertices[group.indices[i*3+1]],
vertices[group.indices[i*3+2]],
],
material.thickness as f64,
ArmorType::from_id(material.armor_type),
));
}
}
faces
}
}
fn parse_armor(url: &str, hull_components: &Map<String, Value>) -> Vec<ArmorFace> {
let mut params = Map::new();
for (k,v) in hull_components {
debug!("Hull has component {}: {}", k, v);
params.insert(k.to_string(), v[0].clone());
}
let page = download_with_params(&url, "armor", &Value::Object(params).to_string());
let scheme: Vec<_> = page.lines().filter(|line| {
line.contains("var scheme")
}).collect();
if scheme.len()!= 1 {
error!("Expected to find exactly one scheme variable! Found {}", scheme.len());
panic!();
}
let armor = scheme[0].split("=").skip(1).collect::<Vec<_>>().join("=");
let armor: Value = serde_json::from_str(&armor[1..armor.len()-1]).unwrap();
let mut faces = vec!();
for (_,v) in armor.as_object().unwrap() {
let url = format!("https://gamemodels3d.com/games/worldofwarships/data/current/armor/{}", v["model"].as_str().unwrap());
let model = download(&url);
if model.len() == 0 {
// Sometimes we get 404 for some reason
continue;
}
let mut m = [0.0; 16];
let transform = v["transform"].as_array().unwrap();
for i in 0..4 {
let col = transform[i].as_array().unwrap();
for j in 0..4 {
m[i*4 + j] = col[j].as_f64().expect(&format!("Couldn't get {}th element of column {}", j, i));
}
}
let m = Matrix4::new(
m[0*4 + 0],
m[0*4 + 1],
m[0*4 + 2],
m[0*4 + 3],
m[1*4 + 0],
m[1*4 + 1],
m[1*4 + 2],
m[1*4 + 3],
m[2*4 + 0],
m[2*4 + 1],
m[2*4 + 2],
m[2*4 + 3],
m[3*4 + 0],
m[3*4 + 1],
m[3*4 + 2],
m[3*4 + 3],
);
//debug!("Got matrix: {:?}", m);
let geometry: RawGeometry = serde_json::from_str(&model).unwrap();
faces.append(&mut geometry.to_armor_faces(m));
}
debug!("Mesh has {} faces", faces.len());
// Get the bounding box
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
debug!("Bounding box: {:?} to {:?}", mins, maxs);
// Dump the mesh as a.obj to debug
{
let mut f = std::fs::File::create("test.obj").unwrap();
for face in faces.iter() {
for v in face.vertices.iter() {
f.write_all(format!("v {} {} {}\n", v.x, v.y, v.z).as_bytes()).unwrap();
}
}
for i in 0..faces.len() {
f.write_all(format!("f {} {} {}\n", i*3+1, i*3+2, i*3+3).as_bytes()).unwrap();
}
}
faces
}
fn
|
(faces: &Vec<ArmorFace>) -> [f64; 3] {
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
[
maxs[0] - mins[0],
maxs[1] - mins[1],
maxs[2] - mins[2],
]
}
fn parse_hull(url: &str, ship_spec: &Value, components: &Map<String, Value>) -> ShipConfiguration {
let hull_spec = ship_spec["components"].as_object().unwrap();
for (key, value) in hull_spec {
debug!("Found component {}: {}", key, value);
}
let hull = &components[hull_spec["hull"].as_array().unwrap()[0].as_str().unwrap()];
let max_speed = hull["maxSpeed"].as_f64().unwrap() / 1.944; // Scaling factor to get m/s, as far as I can tell
let name = hull["name"].as_str().unwrap();
let artillery = if hull_spec.contains_key("artillery") {
let artillery = &hull_spec["artillery"];
let artillery = artillery.as_array().unwrap();
if artillery.len()!= 1 {
warn!("Found an artillery of length {}!", artillery.len());
}
let artillery = &artillery[0];
debug!("Parsing artillery: {:?}", artillery);
parse_artillery(components[artillery.as_str().unwrap()].as_object().unwrap())
} else {
vec!()
};
let geometry = parse_armor(url, hull_spec);
let size = find_size(&geometry);
let length = size[2] * 1.53; // Scaling factor to get meters
ShipConfiguration::new(
artillery,
geometry,
max_speed,
length,
name.to_string(),
)
}
pub fn download_vehicle(vehicle_id: &str) -> Option<Ship> {
trace!("Downloading vehicle_id={}", vehicle_id);
let url = format!("https://gamemodels3d.com/games/worldofwarships/vehicles/{}", vehicle_id);
let page = download(&url);
let vehicle: Vec<_> = page.lines().filter(|line| {
line.contains("var _vehicle")
}).collect();
if vehicle.len()!= 1 {
panic!("Expected vehicle length to be 1!");
}
let spec = vehicle[0].split("=").skip(1).collect::<Vec<_>>().join("=");
//println!("Spec: {}", spec);
let v: Value = serde_json::from_str(&spec[1..spec.len()-1]).unwrap();
let vehicle_components = v["Components"].as_object().unwrap();
let hulls = v["ShipUpgradeInfo"]["_Hull"].as_object().unwrap();
let mut configs = vec!();
for (key, value) in hulls {
debug!("Found hull {}", key);
let hull = parse_hull(&url, value, &vehicle_components);
configs.push(hull);
}
let name = v["name"].as_str().unwrap();
let class = v["class"].as_str().unwrap();
let class = if class == "destroyer" {
ShipClass::Destroyer
} else if class == "cruiser" {
ShipClass::Cruiser
} else if class == "battleship" {
ShipClass::Battleship
} else if class == "aircarrier" {
ShipClass::AircraftCarrier
} else if class == "auxiliary" || class == "submarine" {
// Ignore these
return None;
} else {
error!("Found unknown ship class {} for {}", class, name);
panic!();
};
Some(Ship::new(
configs,
v["level"].as_i64().unwrap().try_into().unwrap(),
v["name"].as_str().unwrap().to_string(),
class,
))
}
|
find_size
|
identifier_name
|
ship_parser.rs
|
use log::{error, warn, debug, trace};
use serde_json::Value;
use serde_json::map::Map;
use crate::ballistics::{Ballistics, Dispersion};
use crate::gun::*;
use crate::download::{download, download_with_params};
use serde_derive::Deserialize;
use std::collections::HashMap;
use cgmath::{Matrix4, Point3};
use std::io::prelude::*;
use std::convert::TryInto;
fn parse_ballistics(ammo: &Map<String, Value>) -> Ballistics {
Ballistics::new(
ammo["bulletMass"].as_f64().unwrap(),
ammo["bulletDiametr"].as_f64().unwrap(),
ammo["bulletSpeed"].as_f64().unwrap(),
ammo["bulletAirDrag"].as_f64().unwrap(),
ammo["bulletKrupp"].as_f64().unwrap()
)
}
fn parse_ammotype(ammo: &Map<String, Value>) -> Ammo {
let ammotype = ammo["ammoType"].as_str().expect("Couldn't find ammoType");
debug!("Found ammo of type {}", ammotype);
let ballistics = parse_ballistics(ammo);
if ammotype == "HE" {
Ammo::new(
AmmoType::He(HeAmmo::new(
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["alphaPiercingHE"].as_f64().expect("Couldn't find alphaPiercingHE"),
)),
ballistics,
)
} else if ammotype == "AP" {
Ammo::new(
AmmoType::Ap(ApAmmo::new(
ammo["bulletDiametr"].as_f64().expect("Couldn't find bulletDiametr"),
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["bulletDetonator"].as_f64().expect("Couldn't find bulletDetonator"),
ammo["bulletDetonatorThreshold"].as_f64().expect("Couldn't find bulletDetonatorThreshold"),
)),
ballistics,
)
} else if ammotype == "CS" {
warn!("Found unimplemented ammo type CS!");
Ammo::new(
AmmoType::He(HeAmmo::new(1.0, 1.0)), ballistics)
} else {
error!("Found unknown ammo type {}!", ammotype);
panic!()
}
}
fn parse_artillery(artillery_spec: &Map<String, Value>) -> Vec<Gun> {
//debug!("{:#?}", artillery_spec);
let guns = artillery_spec["guns"].as_object().unwrap();
/*for (key,gun) in guns {
debug!("{}: {:?}", key, gun);
}*/
let dispersion = Dispersion::new(
artillery_spec["minDistH"].as_f64().expect("Couldn't find horizontal"),
artillery_spec["minDistV"].as_f64().expect("Couldn't find vertical"),
artillery_spec["maxDist"].as_f64().expect("Couldn't find maxrange"),
artillery_spec["sigmaCount"].as_f64().expect("Couldn't find sigmaCount")
);
guns.iter().map(|(_, gun)| {
let ammo_list = gun["ammoList"].as_object().expect("Couldn't get ammoList");
//debug!("{}: {:#?}", key, gun);
let ammo: Vec<_> = ammo_list.iter().map(|(_, ammo)| {
parse_ammotype(ammo.as_object().unwrap())
}).collect();
Gun::new(
dispersion.clone(),
ammo,
)
}).collect()
}
#[derive(Deserialize)]
struct ArmorMaterial {
#[serde(alias = "type")]
armor_type: usize,
thickness: usize, // mm
}
#[derive(Deserialize)]
struct ArmorGroup {
material: String,
indices: Vec<usize>,
}
#[derive(Deserialize)]
struct ArmorObject {
vertices: Vec<f64>,
groups: Vec<ArmorGroup>,
}
#[derive(Deserialize)]
struct GeometryObject {
armor: ArmorObject,
}
#[derive(Deserialize)]
struct RawGeometry {
objects: GeometryObject,
materials: HashMap<String, ArmorMaterial>,
}
impl RawGeometry {
pub fn to_armor_faces(self, matrix: Matrix4<f64>) -> Vec<ArmorFace> {
let mut vertices = vec!();
for i in 0..self.objects.armor.vertices.len()/3 {
let pnt = Point3::new(
self.objects.armor.vertices[i*3+0],
self.objects.armor.vertices[i*3+1],
self.objects.armor.vertices[i*3+2],
);
let pnt = Point3::from_homogeneous(matrix * pnt.to_homogeneous());
vertices.push(Point3::new(pnt.x, pnt.y, pnt.z));
}
let mut faces = vec!();
for group in self.objects.armor.groups {
let material = &self.materials[&group.material];
for i in 0..group.indices.len()/3 {
faces.push(ArmorFace::new(
[
vertices[group.indices[i*3+0]],
vertices[group.indices[i*3+1]],
vertices[group.indices[i*3+2]],
],
material.thickness as f64,
ArmorType::from_id(material.armor_type),
));
}
}
faces
}
}
fn parse_armor(url: &str, hull_components: &Map<String, Value>) -> Vec<ArmorFace> {
let mut params = Map::new();
for (k,v) in hull_components {
debug!("Hull has component {}: {}", k, v);
params.insert(k.to_string(), v[0].clone());
}
let page = download_with_params(&url, "armor", &Value::Object(params).to_string());
let scheme: Vec<_> = page.lines().filter(|line| {
line.contains("var scheme")
}).collect();
if scheme.len()!= 1 {
error!("Expected to find exactly one scheme variable! Found {}", scheme.len());
panic!();
}
let armor = scheme[0].split("=").skip(1).collect::<Vec<_>>().join("=");
let armor: Value = serde_json::from_str(&armor[1..armor.len()-1]).unwrap();
let mut faces = vec!();
for (_,v) in armor.as_object().unwrap() {
let url = format!("https://gamemodels3d.com/games/worldofwarships/data/current/armor/{}", v["model"].as_str().unwrap());
let model = download(&url);
if model.len() == 0 {
// Sometimes we get 404 for some reason
continue;
}
let mut m = [0.0; 16];
let transform = v["transform"].as_array().unwrap();
for i in 0..4 {
let col = transform[i].as_array().unwrap();
for j in 0..4 {
m[i*4 + j] = col[j].as_f64().expect(&format!("Couldn't get {}th element of column {}", j, i));
}
}
let m = Matrix4::new(
m[0*4 + 0],
m[0*4 + 1],
m[0*4 + 2],
m[0*4 + 3],
m[1*4 + 0],
m[1*4 + 1],
m[1*4 + 2],
m[1*4 + 3],
m[2*4 + 0],
m[2*4 + 1],
m[2*4 + 2],
m[2*4 + 3],
m[3*4 + 0],
m[3*4 + 1],
m[3*4 + 2],
m[3*4 + 3],
);
//debug!("Got matrix: {:?}", m);
let geometry: RawGeometry = serde_json::from_str(&model).unwrap();
faces.append(&mut geometry.to_armor_faces(m));
}
debug!("Mesh has {} faces", faces.len());
// Get the bounding box
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
debug!("Bounding box: {:?} to {:?}", mins, maxs);
// Dump the mesh as a.obj to debug
{
let mut f = std::fs::File::create("test.obj").unwrap();
for face in faces.iter() {
for v in face.vertices.iter() {
f.write_all(format!("v {} {} {}\n", v.x, v.y, v.z).as_bytes()).unwrap();
}
}
for i in 0..faces.len() {
f.write_all(format!("f {} {} {}\n", i*3+1, i*3+2, i*3+3).as_bytes()).unwrap();
|
}
faces
}
fn find_size(faces: &Vec<ArmorFace>) -> [f64; 3] {
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
[
maxs[0] - mins[0],
maxs[1] - mins[1],
maxs[2] - mins[2],
]
}
fn parse_hull(url: &str, ship_spec: &Value, components: &Map<String, Value>) -> ShipConfiguration {
let hull_spec = ship_spec["components"].as_object().unwrap();
for (key, value) in hull_spec {
debug!("Found component {}: {}", key, value);
}
let hull = &components[hull_spec["hull"].as_array().unwrap()[0].as_str().unwrap()];
let max_speed = hull["maxSpeed"].as_f64().unwrap() / 1.944; // Scaling factor to get m/s, as far as I can tell
let name = hull["name"].as_str().unwrap();
let artillery = if hull_spec.contains_key("artillery") {
let artillery = &hull_spec["artillery"];
let artillery = artillery.as_array().unwrap();
if artillery.len()!= 1 {
warn!("Found an artillery of length {}!", artillery.len());
}
let artillery = &artillery[0];
debug!("Parsing artillery: {:?}", artillery);
parse_artillery(components[artillery.as_str().unwrap()].as_object().unwrap())
} else {
vec!()
};
let geometry = parse_armor(url, hull_spec);
let size = find_size(&geometry);
let length = size[2] * 1.53; // Scaling factor to get meters
ShipConfiguration::new(
artillery,
geometry,
max_speed,
length,
name.to_string(),
)
}
pub fn download_vehicle(vehicle_id: &str) -> Option<Ship> {
trace!("Downloading vehicle_id={}", vehicle_id);
let url = format!("https://gamemodels3d.com/games/worldofwarships/vehicles/{}", vehicle_id);
let page = download(&url);
let vehicle: Vec<_> = page.lines().filter(|line| {
line.contains("var _vehicle")
}).collect();
if vehicle.len()!= 1 {
panic!("Expected vehicle length to be 1!");
}
let spec = vehicle[0].split("=").skip(1).collect::<Vec<_>>().join("=");
//println!("Spec: {}", spec);
let v: Value = serde_json::from_str(&spec[1..spec.len()-1]).unwrap();
let vehicle_components = v["Components"].as_object().unwrap();
let hulls = v["ShipUpgradeInfo"]["_Hull"].as_object().unwrap();
let mut configs = vec!();
for (key, value) in hulls {
debug!("Found hull {}", key);
let hull = parse_hull(&url, value, &vehicle_components);
configs.push(hull);
}
let name = v["name"].as_str().unwrap();
let class = v["class"].as_str().unwrap();
let class = if class == "destroyer" {
ShipClass::Destroyer
} else if class == "cruiser" {
ShipClass::Cruiser
} else if class == "battleship" {
ShipClass::Battleship
} else if class == "aircarrier" {
ShipClass::AircraftCarrier
} else if class == "auxiliary" || class == "submarine" {
// Ignore these
return None;
} else {
error!("Found unknown ship class {} for {}", class, name);
panic!();
};
Some(Ship::new(
configs,
v["level"].as_i64().unwrap().try_into().unwrap(),
v["name"].as_str().unwrap().to_string(),
class,
))
}
|
}
|
random_line_split
|
ship_parser.rs
|
use log::{error, warn, debug, trace};
use serde_json::Value;
use serde_json::map::Map;
use crate::ballistics::{Ballistics, Dispersion};
use crate::gun::*;
use crate::download::{download, download_with_params};
use serde_derive::Deserialize;
use std::collections::HashMap;
use cgmath::{Matrix4, Point3};
use std::io::prelude::*;
use std::convert::TryInto;
fn parse_ballistics(ammo: &Map<String, Value>) -> Ballistics {
Ballistics::new(
ammo["bulletMass"].as_f64().unwrap(),
ammo["bulletDiametr"].as_f64().unwrap(),
ammo["bulletSpeed"].as_f64().unwrap(),
ammo["bulletAirDrag"].as_f64().unwrap(),
ammo["bulletKrupp"].as_f64().unwrap()
)
}
fn parse_ammotype(ammo: &Map<String, Value>) -> Ammo {
let ammotype = ammo["ammoType"].as_str().expect("Couldn't find ammoType");
debug!("Found ammo of type {}", ammotype);
let ballistics = parse_ballistics(ammo);
if ammotype == "HE"
|
else if ammotype == "AP" {
Ammo::new(
AmmoType::Ap(ApAmmo::new(
ammo["bulletDiametr"].as_f64().expect("Couldn't find bulletDiametr"),
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["bulletDetonator"].as_f64().expect("Couldn't find bulletDetonator"),
ammo["bulletDetonatorThreshold"].as_f64().expect("Couldn't find bulletDetonatorThreshold"),
)),
ballistics,
)
} else if ammotype == "CS" {
warn!("Found unimplemented ammo type CS!");
Ammo::new(
AmmoType::He(HeAmmo::new(1.0, 1.0)), ballistics)
} else {
error!("Found unknown ammo type {}!", ammotype);
panic!()
}
}
fn parse_artillery(artillery_spec: &Map<String, Value>) -> Vec<Gun> {
//debug!("{:#?}", artillery_spec);
let guns = artillery_spec["guns"].as_object().unwrap();
/*for (key,gun) in guns {
debug!("{}: {:?}", key, gun);
}*/
let dispersion = Dispersion::new(
artillery_spec["minDistH"].as_f64().expect("Couldn't find horizontal"),
artillery_spec["minDistV"].as_f64().expect("Couldn't find vertical"),
artillery_spec["maxDist"].as_f64().expect("Couldn't find maxrange"),
artillery_spec["sigmaCount"].as_f64().expect("Couldn't find sigmaCount")
);
guns.iter().map(|(_, gun)| {
let ammo_list = gun["ammoList"].as_object().expect("Couldn't get ammoList");
//debug!("{}: {:#?}", key, gun);
let ammo: Vec<_> = ammo_list.iter().map(|(_, ammo)| {
parse_ammotype(ammo.as_object().unwrap())
}).collect();
Gun::new(
dispersion.clone(),
ammo,
)
}).collect()
}
#[derive(Deserialize)]
struct ArmorMaterial {
#[serde(alias = "type")]
armor_type: usize,
thickness: usize, // mm
}
#[derive(Deserialize)]
struct ArmorGroup {
material: String,
indices: Vec<usize>,
}
#[derive(Deserialize)]
struct ArmorObject {
vertices: Vec<f64>,
groups: Vec<ArmorGroup>,
}
#[derive(Deserialize)]
struct GeometryObject {
armor: ArmorObject,
}
#[derive(Deserialize)]
struct RawGeometry {
objects: GeometryObject,
materials: HashMap<String, ArmorMaterial>,
}
impl RawGeometry {
pub fn to_armor_faces(self, matrix: Matrix4<f64>) -> Vec<ArmorFace> {
let mut vertices = vec!();
for i in 0..self.objects.armor.vertices.len()/3 {
let pnt = Point3::new(
self.objects.armor.vertices[i*3+0],
self.objects.armor.vertices[i*3+1],
self.objects.armor.vertices[i*3+2],
);
let pnt = Point3::from_homogeneous(matrix * pnt.to_homogeneous());
vertices.push(Point3::new(pnt.x, pnt.y, pnt.z));
}
let mut faces = vec!();
for group in self.objects.armor.groups {
let material = &self.materials[&group.material];
for i in 0..group.indices.len()/3 {
faces.push(ArmorFace::new(
[
vertices[group.indices[i*3+0]],
vertices[group.indices[i*3+1]],
vertices[group.indices[i*3+2]],
],
material.thickness as f64,
ArmorType::from_id(material.armor_type),
));
}
}
faces
}
}
fn parse_armor(url: &str, hull_components: &Map<String, Value>) -> Vec<ArmorFace> {
let mut params = Map::new();
for (k,v) in hull_components {
debug!("Hull has component {}: {}", k, v);
params.insert(k.to_string(), v[0].clone());
}
let page = download_with_params(&url, "armor", &Value::Object(params).to_string());
let scheme: Vec<_> = page.lines().filter(|line| {
line.contains("var scheme")
}).collect();
if scheme.len()!= 1 {
error!("Expected to find exactly one scheme variable! Found {}", scheme.len());
panic!();
}
let armor = scheme[0].split("=").skip(1).collect::<Vec<_>>().join("=");
let armor: Value = serde_json::from_str(&armor[1..armor.len()-1]).unwrap();
let mut faces = vec!();
for (_,v) in armor.as_object().unwrap() {
let url = format!("https://gamemodels3d.com/games/worldofwarships/data/current/armor/{}", v["model"].as_str().unwrap());
let model = download(&url);
if model.len() == 0 {
// Sometimes we get 404 for some reason
continue;
}
let mut m = [0.0; 16];
let transform = v["transform"].as_array().unwrap();
for i in 0..4 {
let col = transform[i].as_array().unwrap();
for j in 0..4 {
m[i*4 + j] = col[j].as_f64().expect(&format!("Couldn't get {}th element of column {}", j, i));
}
}
let m = Matrix4::new(
m[0*4 + 0],
m[0*4 + 1],
m[0*4 + 2],
m[0*4 + 3],
m[1*4 + 0],
m[1*4 + 1],
m[1*4 + 2],
m[1*4 + 3],
m[2*4 + 0],
m[2*4 + 1],
m[2*4 + 2],
m[2*4 + 3],
m[3*4 + 0],
m[3*4 + 1],
m[3*4 + 2],
m[3*4 + 3],
);
//debug!("Got matrix: {:?}", m);
let geometry: RawGeometry = serde_json::from_str(&model).unwrap();
faces.append(&mut geometry.to_armor_faces(m));
}
debug!("Mesh has {} faces", faces.len());
// Get the bounding box
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
debug!("Bounding box: {:?} to {:?}", mins, maxs);
// Dump the mesh as a.obj to debug
{
let mut f = std::fs::File::create("test.obj").unwrap();
for face in faces.iter() {
for v in face.vertices.iter() {
f.write_all(format!("v {} {} {}\n", v.x, v.y, v.z).as_bytes()).unwrap();
}
}
for i in 0..faces.len() {
f.write_all(format!("f {} {} {}\n", i*3+1, i*3+2, i*3+3).as_bytes()).unwrap();
}
}
faces
}
fn find_size(faces: &Vec<ArmorFace>) -> [f64; 3] {
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
[
maxs[0] - mins[0],
maxs[1] - mins[1],
maxs[2] - mins[2],
]
}
fn parse_hull(url: &str, ship_spec: &Value, components: &Map<String, Value>) -> ShipConfiguration {
let hull_spec = ship_spec["components"].as_object().unwrap();
for (key, value) in hull_spec {
debug!("Found component {}: {}", key, value);
}
let hull = &components[hull_spec["hull"].as_array().unwrap()[0].as_str().unwrap()];
let max_speed = hull["maxSpeed"].as_f64().unwrap() / 1.944; // Scaling factor to get m/s, as far as I can tell
let name = hull["name"].as_str().unwrap();
let artillery = if hull_spec.contains_key("artillery") {
let artillery = &hull_spec["artillery"];
let artillery = artillery.as_array().unwrap();
if artillery.len()!= 1 {
warn!("Found an artillery of length {}!", artillery.len());
}
let artillery = &artillery[0];
debug!("Parsing artillery: {:?}", artillery);
parse_artillery(components[artillery.as_str().unwrap()].as_object().unwrap())
} else {
vec!()
};
let geometry = parse_armor(url, hull_spec);
let size = find_size(&geometry);
let length = size[2] * 1.53; // Scaling factor to get meters
ShipConfiguration::new(
artillery,
geometry,
max_speed,
length,
name.to_string(),
)
}
pub fn download_vehicle(vehicle_id: &str) -> Option<Ship> {
trace!("Downloading vehicle_id={}", vehicle_id);
let url = format!("https://gamemodels3d.com/games/worldofwarships/vehicles/{}", vehicle_id);
let page = download(&url);
let vehicle: Vec<_> = page.lines().filter(|line| {
line.contains("var _vehicle")
}).collect();
if vehicle.len()!= 1 {
panic!("Expected vehicle length to be 1!");
}
let spec = vehicle[0].split("=").skip(1).collect::<Vec<_>>().join("=");
//println!("Spec: {}", spec);
let v: Value = serde_json::from_str(&spec[1..spec.len()-1]).unwrap();
let vehicle_components = v["Components"].as_object().unwrap();
let hulls = v["ShipUpgradeInfo"]["_Hull"].as_object().unwrap();
let mut configs = vec!();
for (key, value) in hulls {
debug!("Found hull {}", key);
let hull = parse_hull(&url, value, &vehicle_components);
configs.push(hull);
}
let name = v["name"].as_str().unwrap();
let class = v["class"].as_str().unwrap();
let class = if class == "destroyer" {
ShipClass::Destroyer
} else if class == "cruiser" {
ShipClass::Cruiser
} else if class == "battleship" {
ShipClass::Battleship
} else if class == "aircarrier" {
ShipClass::AircraftCarrier
} else if class == "auxiliary" || class == "submarine" {
// Ignore these
return None;
} else {
error!("Found unknown ship class {} for {}", class, name);
panic!();
};
Some(Ship::new(
configs,
v["level"].as_i64().unwrap().try_into().unwrap(),
v["name"].as_str().unwrap().to_string(),
class,
))
}
|
{
Ammo::new(
AmmoType::He(HeAmmo::new(
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["alphaPiercingHE"].as_f64().expect("Couldn't find alphaPiercingHE"),
)),
ballistics,
)
}
|
conditional_block
|
ship_parser.rs
|
use log::{error, warn, debug, trace};
use serde_json::Value;
use serde_json::map::Map;
use crate::ballistics::{Ballistics, Dispersion};
use crate::gun::*;
use crate::download::{download, download_with_params};
use serde_derive::Deserialize;
use std::collections::HashMap;
use cgmath::{Matrix4, Point3};
use std::io::prelude::*;
use std::convert::TryInto;
fn parse_ballistics(ammo: &Map<String, Value>) -> Ballistics {
Ballistics::new(
ammo["bulletMass"].as_f64().unwrap(),
ammo["bulletDiametr"].as_f64().unwrap(),
ammo["bulletSpeed"].as_f64().unwrap(),
ammo["bulletAirDrag"].as_f64().unwrap(),
ammo["bulletKrupp"].as_f64().unwrap()
)
}
fn parse_ammotype(ammo: &Map<String, Value>) -> Ammo {
let ammotype = ammo["ammoType"].as_str().expect("Couldn't find ammoType");
debug!("Found ammo of type {}", ammotype);
let ballistics = parse_ballistics(ammo);
if ammotype == "HE" {
Ammo::new(
AmmoType::He(HeAmmo::new(
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["alphaPiercingHE"].as_f64().expect("Couldn't find alphaPiercingHE"),
)),
ballistics,
)
} else if ammotype == "AP" {
Ammo::new(
AmmoType::Ap(ApAmmo::new(
ammo["bulletDiametr"].as_f64().expect("Couldn't find bulletDiametr"),
ammo["alphaDamage"].as_f64().expect("Couldn't find alphaDamage"),
ammo["bulletDetonator"].as_f64().expect("Couldn't find bulletDetonator"),
ammo["bulletDetonatorThreshold"].as_f64().expect("Couldn't find bulletDetonatorThreshold"),
)),
ballistics,
)
} else if ammotype == "CS" {
warn!("Found unimplemented ammo type CS!");
Ammo::new(
AmmoType::He(HeAmmo::new(1.0, 1.0)), ballistics)
} else {
error!("Found unknown ammo type {}!", ammotype);
panic!()
}
}
fn parse_artillery(artillery_spec: &Map<String, Value>) -> Vec<Gun>
|
ammo,
)
}).collect()
}
#[derive(Deserialize)]
struct ArmorMaterial {
#[serde(alias = "type")]
armor_type: usize,
thickness: usize, // mm
}
#[derive(Deserialize)]
struct ArmorGroup {
material: String,
indices: Vec<usize>,
}
#[derive(Deserialize)]
struct ArmorObject {
vertices: Vec<f64>,
groups: Vec<ArmorGroup>,
}
#[derive(Deserialize)]
struct GeometryObject {
armor: ArmorObject,
}
#[derive(Deserialize)]
struct RawGeometry {
objects: GeometryObject,
materials: HashMap<String, ArmorMaterial>,
}
impl RawGeometry {
pub fn to_armor_faces(self, matrix: Matrix4<f64>) -> Vec<ArmorFace> {
let mut vertices = vec!();
for i in 0..self.objects.armor.vertices.len()/3 {
let pnt = Point3::new(
self.objects.armor.vertices[i*3+0],
self.objects.armor.vertices[i*3+1],
self.objects.armor.vertices[i*3+2],
);
let pnt = Point3::from_homogeneous(matrix * pnt.to_homogeneous());
vertices.push(Point3::new(pnt.x, pnt.y, pnt.z));
}
let mut faces = vec!();
for group in self.objects.armor.groups {
let material = &self.materials[&group.material];
for i in 0..group.indices.len()/3 {
faces.push(ArmorFace::new(
[
vertices[group.indices[i*3+0]],
vertices[group.indices[i*3+1]],
vertices[group.indices[i*3+2]],
],
material.thickness as f64,
ArmorType::from_id(material.armor_type),
));
}
}
faces
}
}
fn parse_armor(url: &str, hull_components: &Map<String, Value>) -> Vec<ArmorFace> {
let mut params = Map::new();
for (k,v) in hull_components {
debug!("Hull has component {}: {}", k, v);
params.insert(k.to_string(), v[0].clone());
}
let page = download_with_params(&url, "armor", &Value::Object(params).to_string());
let scheme: Vec<_> = page.lines().filter(|line| {
line.contains("var scheme")
}).collect();
if scheme.len()!= 1 {
error!("Expected to find exactly one scheme variable! Found {}", scheme.len());
panic!();
}
let armor = scheme[0].split("=").skip(1).collect::<Vec<_>>().join("=");
let armor: Value = serde_json::from_str(&armor[1..armor.len()-1]).unwrap();
let mut faces = vec!();
for (_,v) in armor.as_object().unwrap() {
let url = format!("https://gamemodels3d.com/games/worldofwarships/data/current/armor/{}", v["model"].as_str().unwrap());
let model = download(&url);
if model.len() == 0 {
// Sometimes we get 404 for some reason
continue;
}
let mut m = [0.0; 16];
let transform = v["transform"].as_array().unwrap();
for i in 0..4 {
let col = transform[i].as_array().unwrap();
for j in 0..4 {
m[i*4 + j] = col[j].as_f64().expect(&format!("Couldn't get {}th element of column {}", j, i));
}
}
let m = Matrix4::new(
m[0*4 + 0],
m[0*4 + 1],
m[0*4 + 2],
m[0*4 + 3],
m[1*4 + 0],
m[1*4 + 1],
m[1*4 + 2],
m[1*4 + 3],
m[2*4 + 0],
m[2*4 + 1],
m[2*4 + 2],
m[2*4 + 3],
m[3*4 + 0],
m[3*4 + 1],
m[3*4 + 2],
m[3*4 + 3],
);
//debug!("Got matrix: {:?}", m);
let geometry: RawGeometry = serde_json::from_str(&model).unwrap();
faces.append(&mut geometry.to_armor_faces(m));
}
debug!("Mesh has {} faces", faces.len());
// Get the bounding box
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
debug!("Bounding box: {:?} to {:?}", mins, maxs);
// Dump the mesh as a.obj to debug
{
let mut f = std::fs::File::create("test.obj").unwrap();
for face in faces.iter() {
for v in face.vertices.iter() {
f.write_all(format!("v {} {} {}\n", v.x, v.y, v.z).as_bytes()).unwrap();
}
}
for i in 0..faces.len() {
f.write_all(format!("f {} {} {}\n", i*3+1, i*3+2, i*3+3).as_bytes()).unwrap();
}
}
faces
}
fn find_size(faces: &Vec<ArmorFace>) -> [f64; 3] {
let mins = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(1./0., f64::min),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(1./0., f64::min),
];
let maxs = [
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.x}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.y}).fold(-1./0., f64::max),
faces.iter().map(|face| { face.vertices.iter() }).flatten().map(|p| {p.z}).fold(-1./0., f64::max),
];
[
maxs[0] - mins[0],
maxs[1] - mins[1],
maxs[2] - mins[2],
]
}
fn parse_hull(url: &str, ship_spec: &Value, components: &Map<String, Value>) -> ShipConfiguration {
let hull_spec = ship_spec["components"].as_object().unwrap();
for (key, value) in hull_spec {
debug!("Found component {}: {}", key, value);
}
let hull = &components[hull_spec["hull"].as_array().unwrap()[0].as_str().unwrap()];
let max_speed = hull["maxSpeed"].as_f64().unwrap() / 1.944; // Scaling factor to get m/s, as far as I can tell
let name = hull["name"].as_str().unwrap();
let artillery = if hull_spec.contains_key("artillery") {
let artillery = &hull_spec["artillery"];
let artillery = artillery.as_array().unwrap();
if artillery.len()!= 1 {
warn!("Found an artillery of length {}!", artillery.len());
}
let artillery = &artillery[0];
debug!("Parsing artillery: {:?}", artillery);
parse_artillery(components[artillery.as_str().unwrap()].as_object().unwrap())
} else {
vec!()
};
let geometry = parse_armor(url, hull_spec);
let size = find_size(&geometry);
let length = size[2] * 1.53; // Scaling factor to get meters
ShipConfiguration::new(
artillery,
geometry,
max_speed,
length,
name.to_string(),
)
}
pub fn download_vehicle(vehicle_id: &str) -> Option<Ship> {
trace!("Downloading vehicle_id={}", vehicle_id);
let url = format!("https://gamemodels3d.com/games/worldofwarships/vehicles/{}", vehicle_id);
let page = download(&url);
let vehicle: Vec<_> = page.lines().filter(|line| {
line.contains("var _vehicle")
}).collect();
if vehicle.len()!= 1 {
panic!("Expected vehicle length to be 1!");
}
let spec = vehicle[0].split("=").skip(1).collect::<Vec<_>>().join("=");
//println!("Spec: {}", spec);
let v: Value = serde_json::from_str(&spec[1..spec.len()-1]).unwrap();
let vehicle_components = v["Components"].as_object().unwrap();
let hulls = v["ShipUpgradeInfo"]["_Hull"].as_object().unwrap();
let mut configs = vec!();
for (key, value) in hulls {
debug!("Found hull {}", key);
let hull = parse_hull(&url, value, &vehicle_components);
configs.push(hull);
}
let name = v["name"].as_str().unwrap();
let class = v["class"].as_str().unwrap();
let class = if class == "destroyer" {
ShipClass::Destroyer
} else if class == "cruiser" {
ShipClass::Cruiser
} else if class == "battleship" {
ShipClass::Battleship
} else if class == "aircarrier" {
ShipClass::AircraftCarrier
} else if class == "auxiliary" || class == "submarine" {
// Ignore these
return None;
} else {
error!("Found unknown ship class {} for {}", class, name);
panic!();
};
Some(Ship::new(
configs,
v["level"].as_i64().unwrap().try_into().unwrap(),
v["name"].as_str().unwrap().to_string(),
class,
))
}
|
{
//debug!("{:#?}", artillery_spec);
let guns = artillery_spec["guns"].as_object().unwrap();
/*for (key,gun) in guns {
debug!("{}: {:?}", key, gun);
}*/
let dispersion = Dispersion::new(
artillery_spec["minDistH"].as_f64().expect("Couldn't find horizontal"),
artillery_spec["minDistV"].as_f64().expect("Couldn't find vertical"),
artillery_spec["maxDist"].as_f64().expect("Couldn't find maxrange"),
artillery_spec["sigmaCount"].as_f64().expect("Couldn't find sigmaCount")
);
guns.iter().map(|(_, gun)| {
let ammo_list = gun["ammoList"].as_object().expect("Couldn't get ammoList");
//debug!("{}: {:#?}", key, gun);
let ammo: Vec<_> = ammo_list.iter().map(|(_, ammo)| {
parse_ammotype(ammo.as_object().unwrap())
}).collect();
Gun::new(
dispersion.clone(),
|
identifier_body
|
editor.rs
|
use rustyline_derive::{Completer, Hinter, Validator};
use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use serde_json::json;
use std::{cell::RefCell, collections::HashMap, fmt::Write, ops::Range};
use tree_sitter::{Query, QueryCursor};
use tree_sitter_highlight::{HighlightConfiguration, HighlightEvent, Highlighter};
use which::which;
lazy_static! {
static ref CSS_STYLES_BY_COLOR_ID: Vec<String> =
serde_json::from_str(include_str!("../vendor/xterm-colors.json")).unwrap();
}
#[derive(Debug, Default)]
pub struct Style {
pub ansi: ansi_term::Style,
pub css: Option<String>,
}
#[derive(Debug)]
pub struct Theme {
pub styles: Vec<Style>,
pub highlight_names: Vec<String>,
}
#[derive(Default, Deserialize, Serialize)]
pub struct ThemeConfig {
#[serde(default)]
pub theme: Theme,
}
impl Theme {
/* pub fn load(path: &Path) -> std::io::Result<Self> {
let json = std::fs::read_to_string(path)?;
Ok(serde_json::from_str(&json).unwrap_or_default())
} */
pub fn default_style(&self) -> Style {
Style::default()
}
}
impl<'de> Deserialize<'de> for Theme {
fn deserialize<D>(deserializer: D) -> std::result::Result<Theme, D::Error>
where
D: Deserializer<'de>,
{
let mut styles = Vec::new();
let mut highlight_names = Vec::new();
if let Ok(colors) = HashMap::<String, serde_json::Value>::deserialize(deserializer) {
highlight_names.reserve(colors.len());
styles.reserve(colors.len());
for (name, style_value) in colors {
let mut style = Style::default();
parse_style(&mut style, style_value);
highlight_names.push(name);
styles.push(style);
}
}
Ok(Self {
styles,
highlight_names,
})
}
}
impl Serialize for Theme {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.styles.len()))?;
for (name, style) in self.highlight_names.iter().zip(&self.styles) {
let style = &style.ansi;
let color = style.foreground.map(|color| match color {
Color::Black => json!("black"),
Color::Blue => json!("blue"),
Color::Cyan => json!("cyan"),
Color::Green => json!("green"),
Color::Purple => json!("purple"),
Color::Red => json!("red"),
Color::White => json!("white"),
Color::Yellow => json!("yellow"),
Color::RGB(r, g, b) => json!(format!("#{:x?}{:x?}{:x?}", r, g, b)),
Color::Fixed(n) => json!(n),
});
if style.is_bold || style.is_italic || style.is_underline {
let mut style_json = HashMap::new();
if let Some(color) = color {
style_json.insert("color", color);
}
if style.is_bold {
style_json.insert("bold", serde_json::Value::Bool(true));
}
if style.is_italic {
style_json.insert("italic", serde_json::Value::Bool(true));
}
if style.is_underline {
style_json.insert("underline", serde_json::Value::Bool(true));
}
map.serialize_entry(&name, &style_json)?;
} else if let Some(color) = color {
map.serialize_entry(&name, &color)?;
} else {
map.serialize_entry(&name, &serde_json::Value::Null)?;
}
}
map.end()
}
}
impl Default for Theme {
fn default() -> Self {
serde_json::from_str(
r#"
{
"attribute": {"color": 124, "italic": true},
"comment": {"color": 245, "italic": true},
"constant.builtin": {"color": 94, "bold": true},
"constant": 94,
"constructor": 136,
"embedded": null,
"function.builtin": {"color": 26, "bold": true},
"function": 26,
"keyword": 56,
"number": {"color": 94, "bold": true},
"property": 124,
"operator": {"color": 239, "bold": true},
"punctuation.bracket": 239,
"punctuation.delimiter": 239,
"string.special": 30,
"string": 28,
"tag": 18,
"type": 23,
"type.builtin": {"color": 23, "bold": true},
"variable.builtin": {"bold": true},
"variable.parameter": {"underline": true}
}
"#,
)
.unwrap()
}
}
fn parse_style(style: &mut Style, json: serde_json::Value) {
use serde_json::Value;
if let Value::Object(entries) = json {
for (property_name, value) in entries {
match property_name.as_str() {
"bold" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.bold()
}
}
"italic" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.italic()
}
}
"underline" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.underline()
}
}
"color" => {
if let Some(color) = parse_color(value) {
style.ansi = style.ansi.fg(color);
}
}
_ => {}
}
}
style.css = Some(style_to_css(style.ansi));
} else if let Some(color) = parse_color(json) {
style.ansi = style.ansi.fg(color);
style.css = Some(style_to_css(style.ansi));
} else {
style.css = None;
}
if let Some(Color::RGB(red, green, blue)) = style.ansi.foreground {
if!terminal_supports_truecolor() {
style.ansi = style.ansi.fg(closest_xterm_color(red, green, blue));
}
}
}
fn terminal_supports_truecolor() -> bool {
use std::env;
if let Ok(truecolor) = env::var("COLORTERM") {
truecolor == "truecolor" || truecolor == "24bit"
} else {
false
}
}
fn closest_xterm_color(red: u8, green: u8, blue: u8) -> Color {
use std::cmp::{max, min};
let colors = CSS_STYLES_BY_COLOR_ID
.iter()
.enumerate()
.map(|(color_id, hex)| (color_id as u8, hex_string_to_rgb(hex).unwrap()));
// Get the xterm color with the minimum Euclidean distance to the target color
// i.e. distance = √ (r2 - r1)² + (g2 - g1)² + (b2 - b1)²
let distances = colors.map(|(color_id, (r, g, b))| {
let r_delta: u32 = (max(r, red) - min(r, red)).into();
let g_delta: u32 = (max(g, green) - min(g, green)).into();
let b_delta: u32 = (max(b, blue) - min(b, blue)).into();
let distance = r_delta.pow(2) + g_delta.pow(2) + b_delta.pow(2);
// don't need to actually take the square root for the sake of comparison
(color_id, distance)
});
Color::Fixed(distances.min_by(|(_, d1), (_, d2)| d1.cmp(d2)).unwrap().0)
}
fn parse_color(json: serde_json::Value) -> Option<Color> {
use serde_json::Value;
match json {
Value::Number(n) => n.as_u64().map(|c| Color::Fixed(c as u8)),
Value::String(s) => match s.to_lowercase().as_str() {
"black" => Some(Color::Black),
"blue" => Some(Color::Blue),
"cyan" => Some(Color::Cyan),
"green" => Some(Color::Green),
"purple" => Some(Color::Purple),
"red" => Some(Color::Red),
"white" => Some(Color::White),
"yellow" => Some(Color::Yellow),
s => {
if let Some((red, green, blue)) = hex_string_to_rgb(s) {
Some(Color::RGB(red, green, blue))
} else {
None
}
}
},
_ => None,
}
}
fn hex_string_to_rgb(s: &str) -> Option<(u8, u8, u8)> {
if s.starts_with('#') && s.len() >= 7 {
if let (Ok(red), Ok(green), Ok(blue)) = (
u8::from_str_radix(&s[1..3], 16),
u8::from_str_radix(&s[3..5], 16),
u8::from_str_radix(&s[5..7], 16),
) {
Some((red, green, blue))
} else {
None
}
} else {
None
}
}
fn style_to_css(style: ansi_term::Style) -> String {
let mut result = "style='".to_string();
if style.is_underline {
write!(&mut result, "text-decoration: underline;").unwrap();
}
if style.is_bold {
write!(&mut result, "font-weight: bold;").unwrap();
}
if style.is_italic {
write!(&mut result, "font-style: italic;").unwrap();
}
if let Some(color) = style.foreground {
write_color(&mut result, color);
}
result.push('\'');
result
}
fn write_color(buffer: &mut String, color: Color) {
if let Color::RGB(r, g, b) = &color {
write!(buffer, "color: #{:x?}{:x?}{:x?}", r, g, b).unwrap()
} else {
write!(
buffer,
"color: {}",
match color {
Color::Black => "black",
Color::Blue => "blue",
Color::Red => "red",
Color::Green => "green",
Color::Yellow => "yellow",
Color::Cyan => "cyan",
Color::Purple => "purple",
Color::White => "white",
Color::Fixed(n) => CSS_STYLES_BY_COLOR_ID[n as usize].as_str(),
Color::RGB(_, _, _) => unreachable!(),
}
)
.unwrap()
}
}
#[derive(Completer, Hinter, Validator)]
pub(crate) struct Editor {
hi: RefCell<Highlighter>,
hi_cfg: HighlightConfiguration,
hi_theme: Theme,
cmd_query: Query,
}
impl Editor {
pub fn new() -> Self {
let lang = tree_sitter_rshcmd::language();
let mut hi_cfg =
HighlightConfiguration::new(lang, tree_sitter_rshcmd::HIGHLIGHTS_QUERY, "", "")
.expect("Could not init tree sitter");
let hi_theme: Theme = Default::default();
hi_cfg.configure(&hi_theme.highlight_names);
Editor {
hi: RefCell::new(Highlighter::new()),
hi_cfg,
hi_theme,
cmd_query: Query::new(lang, r"(cmd_name (identifier) @cmd)")
.expect("error building query"),
}
}
}
struct Styling {
current: Vec<StylingChoice>,
}
struct StylingChoice {
range: Range<usize>,
style: ansi_term::Style,
prio: usize,
}
impl Styling {
fn new(_len: usize) -> Self {
Styling {
// current: vec![(0..len, (ansi_term::Style::new(), 0))],
current: Vec::new(),
}
}
fn insert(&mut self, style: ansi_term::Style, range: Range<usize>, prio: usize) {
self.current.push(StylingChoice { range, style, prio });
}
fn resolve_ranges(&self, len: usize) -> Vec<(Range<usize>, &ansi_term::Style)> {
struct StyleList<'a> {
backing: Vec<(usize, &'a ansi_term::Style, usize)>,
}
impl<'a> StyleList<'a> {
fn new<I>(i: I) -> Self
where
I: IntoIterator<Item = (usize, &'a ansi_term::Style, usize)>,
{
let mut backing: Vec<_> = i.into_iter().collect();
backing.sort_by(|a, b| b.2.cmp(&a.2));
Self { backing }
}
fn remove(&mut self, idx: usize) {
let i = self
.backing
.iter()
.enumerate()
.find(|(_, s)| s.0 == idx)
.unwrap()
.0;
self.backing.remove(i);
}
fn insert(&mut self, idx: usize, style: &'a ansi_term::Style, prio: usize) {
self.backing.push((idx, style, prio));
self.backing.sort_by(|a, b| b.2.cmp(&a.2));
}
fn current(&self) -> &'a ansi_term::Style {
self.backing[0].1
}
}
if len > 0 {
let mut start = HashMap::new();
let mut end = HashMap::new();
for (i, r) in self.current.iter().enumerate() {
start
.entry(r.range.start)
.or_insert_with(Vec::new)
.push((i, &r.style, r.prio));
end.entry(r.range.end).or_insert_with(Vec::new).push(i);
}
let mut ranges = Vec::new();
let mut rstart = 0;
let mut styles = StyleList::new(start.get(&0).unwrap().iter().copied());
for i in 1..len {
if let Some(ends) = end.get(&i) {
ranges.push((rstart..i, styles.current()));
for idx in ends {
styles.remove(*idx);
}
rstart = i;
}
if let Some(starts) = start.get(&i) {
for (idx, style, prio) in starts {
styles.insert(*idx, style, *prio);
}
}
}
ranges.push((rstart..len, styles.current()));
ranges
} else {
Vec::new()
}
}
fn paint(&self, source: &str) -> String {
let mut s = Vec::new();
for (range, style) in self.resolve_ranges(source.len()) {
style
.paint(&source.as_bytes()[range])
.write_to(&mut s)
.expect("can fail write in string?");
}
String::from_utf8(s).expect("we got UTF-8 in, hi is UTF8")
}
}
impl HiTrait for Editor {
fn highlight<'l>(&self, line: &'l str, _pos: usize) -> std::borrow::Cow<'l, str> {
let mut hi = self.hi.borrow_mut();
let events = hi
.highlight(&self.hi_cfg, line.as_bytes(), None, |_| None)
.expect("hi failed");
let mut stylings = Styling::new(line.len());
let mut style_stack = vec![self.hi_theme.default_style().ansi];
for event in events {
match event.expect("hi failure") {
HighlightEvent::HighlightStart(kind) => {
style_stack.push(self.hi_theme.styles[kind.0].ansi);
}
HighlightEvent::HighlightEnd => {
|
}
HighlightEvent::Source { start, end } => {
let style = style_stack.last().unwrap();
stylings.insert(*style, start..end, 1);
}
}
}
let parsed = hi.parser().parse(line, None);
if let Some(parsed) = parsed {
for query_match in
QueryCursor::new().matches(&self.cmd_query, parsed.root_node(), line.as_bytes())
{
for capture in query_match.captures {
let start = capture.node.start_byte();
let end = capture.node.end_byte();
let is_exec = which(&line[start..end]).is_ok();
if is_exec {
stylings.insert(ansi_term::Style::new().fg(Color::Green), start..end, 2);
} else {
stylings.insert(ansi_term::Style::new().fg(Color::Red), start..end, 2);
}
}
}
|
style_stack.pop();
|
random_line_split
|
editor.rs
|
ustyline_derive::{Completer, Hinter, Validator};
use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use serde_json::json;
use std::{cell::RefCell, collections::HashMap, fmt::Write, ops::Range};
use tree_sitter::{Query, QueryCursor};
use tree_sitter_highlight::{HighlightConfiguration, HighlightEvent, Highlighter};
use which::which;
lazy_static! {
static ref CSS_STYLES_BY_COLOR_ID: Vec<String> =
serde_json::from_str(include_str!("../vendor/xterm-colors.json")).unwrap();
}
#[derive(Debug, Default)]
pub struct
|
{
pub ansi: ansi_term::Style,
pub css: Option<String>,
}
#[derive(Debug)]
pub struct Theme {
pub styles: Vec<Style>,
pub highlight_names: Vec<String>,
}
#[derive(Default, Deserialize, Serialize)]
pub struct ThemeConfig {
#[serde(default)]
pub theme: Theme,
}
impl Theme {
/* pub fn load(path: &Path) -> std::io::Result<Self> {
let json = std::fs::read_to_string(path)?;
Ok(serde_json::from_str(&json).unwrap_or_default())
} */
pub fn default_style(&self) -> Style {
Style::default()
}
}
impl<'de> Deserialize<'de> for Theme {
fn deserialize<D>(deserializer: D) -> std::result::Result<Theme, D::Error>
where
D: Deserializer<'de>,
{
let mut styles = Vec::new();
let mut highlight_names = Vec::new();
if let Ok(colors) = HashMap::<String, serde_json::Value>::deserialize(deserializer) {
highlight_names.reserve(colors.len());
styles.reserve(colors.len());
for (name, style_value) in colors {
let mut style = Style::default();
parse_style(&mut style, style_value);
highlight_names.push(name);
styles.push(style);
}
}
Ok(Self {
styles,
highlight_names,
})
}
}
impl Serialize for Theme {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.styles.len()))?;
for (name, style) in self.highlight_names.iter().zip(&self.styles) {
let style = &style.ansi;
let color = style.foreground.map(|color| match color {
Color::Black => json!("black"),
Color::Blue => json!("blue"),
Color::Cyan => json!("cyan"),
Color::Green => json!("green"),
Color::Purple => json!("purple"),
Color::Red => json!("red"),
Color::White => json!("white"),
Color::Yellow => json!("yellow"),
Color::RGB(r, g, b) => json!(format!("#{:x?}{:x?}{:x?}", r, g, b)),
Color::Fixed(n) => json!(n),
});
if style.is_bold || style.is_italic || style.is_underline {
let mut style_json = HashMap::new();
if let Some(color) = color {
style_json.insert("color", color);
}
if style.is_bold {
style_json.insert("bold", serde_json::Value::Bool(true));
}
if style.is_italic {
style_json.insert("italic", serde_json::Value::Bool(true));
}
if style.is_underline {
style_json.insert("underline", serde_json::Value::Bool(true));
}
map.serialize_entry(&name, &style_json)?;
} else if let Some(color) = color {
map.serialize_entry(&name, &color)?;
} else {
map.serialize_entry(&name, &serde_json::Value::Null)?;
}
}
map.end()
}
}
impl Default for Theme {
fn default() -> Self {
serde_json::from_str(
r#"
{
"attribute": {"color": 124, "italic": true},
"comment": {"color": 245, "italic": true},
"constant.builtin": {"color": 94, "bold": true},
"constant": 94,
"constructor": 136,
"embedded": null,
"function.builtin": {"color": 26, "bold": true},
"function": 26,
"keyword": 56,
"number": {"color": 94, "bold": true},
"property": 124,
"operator": {"color": 239, "bold": true},
"punctuation.bracket": 239,
"punctuation.delimiter": 239,
"string.special": 30,
"string": 28,
"tag": 18,
"type": 23,
"type.builtin": {"color": 23, "bold": true},
"variable.builtin": {"bold": true},
"variable.parameter": {"underline": true}
}
"#,
)
.unwrap()
}
}
fn parse_style(style: &mut Style, json: serde_json::Value) {
use serde_json::Value;
if let Value::Object(entries) = json {
for (property_name, value) in entries {
match property_name.as_str() {
"bold" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.bold()
}
}
"italic" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.italic()
}
}
"underline" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.underline()
}
}
"color" => {
if let Some(color) = parse_color(value) {
style.ansi = style.ansi.fg(color);
}
}
_ => {}
}
}
style.css = Some(style_to_css(style.ansi));
} else if let Some(color) = parse_color(json) {
style.ansi = style.ansi.fg(color);
style.css = Some(style_to_css(style.ansi));
} else {
style.css = None;
}
if let Some(Color::RGB(red, green, blue)) = style.ansi.foreground {
if!terminal_supports_truecolor() {
style.ansi = style.ansi.fg(closest_xterm_color(red, green, blue));
}
}
}
fn terminal_supports_truecolor() -> bool {
use std::env;
if let Ok(truecolor) = env::var("COLORTERM") {
truecolor == "truecolor" || truecolor == "24bit"
} else {
false
}
}
fn closest_xterm_color(red: u8, green: u8, blue: u8) -> Color {
use std::cmp::{max, min};
let colors = CSS_STYLES_BY_COLOR_ID
.iter()
.enumerate()
.map(|(color_id, hex)| (color_id as u8, hex_string_to_rgb(hex).unwrap()));
// Get the xterm color with the minimum Euclidean distance to the target color
// i.e. distance = √ (r2 - r1)² + (g2 - g1)² + (b2 - b1)²
let distances = colors.map(|(color_id, (r, g, b))| {
let r_delta: u32 = (max(r, red) - min(r, red)).into();
let g_delta: u32 = (max(g, green) - min(g, green)).into();
let b_delta: u32 = (max(b, blue) - min(b, blue)).into();
let distance = r_delta.pow(2) + g_delta.pow(2) + b_delta.pow(2);
// don't need to actually take the square root for the sake of comparison
(color_id, distance)
});
Color::Fixed(distances.min_by(|(_, d1), (_, d2)| d1.cmp(d2)).unwrap().0)
}
fn parse_color(json: serde_json::Value) -> Option<Color> {
use serde_json::Value;
match json {
Value::Number(n) => n.as_u64().map(|c| Color::Fixed(c as u8)),
Value::String(s) => match s.to_lowercase().as_str() {
"black" => Some(Color::Black),
"blue" => Some(Color::Blue),
"cyan" => Some(Color::Cyan),
"green" => Some(Color::Green),
"purple" => Some(Color::Purple),
"red" => Some(Color::Red),
"white" => Some(Color::White),
"yellow" => Some(Color::Yellow),
s => {
if let Some((red, green, blue)) = hex_string_to_rgb(s) {
Some(Color::RGB(red, green, blue))
} else {
None
}
}
},
_ => None,
}
}
fn hex_string_to_rgb(s: &str) -> Option<(u8, u8, u8)> {
if s.starts_with('#') && s.len() >= 7 {
if let (Ok(red), Ok(green), Ok(blue)) = (
u8::from_str_radix(&s[1..3], 16),
u8::from_str_radix(&s[3..5], 16),
u8::from_str_radix(&s[5..7], 16),
) {
Some((red, green, blue))
} else {
None
}
} else {
None
}
}
fn style_to_css(style: ansi_term::Style) -> String {
let mut result = "style='".to_string();
if style.is_underline {
write!(&mut result, "text-decoration: underline;").unwrap();
}
if style.is_bold {
write!(&mut result, "font-weight: bold;").unwrap();
}
if style.is_italic {
write!(&mut result, "font-style: italic;").unwrap();
}
if let Some(color) = style.foreground {
write_color(&mut result, color);
}
result.push('\'');
result
}
fn write_color(buffer: &mut String, color: Color) {
if let Color::RGB(r, g, b) = &color {
write!(buffer, "color: #{:x?}{:x?}{:x?}", r, g, b).unwrap()
} else {
write!(
buffer,
"color: {}",
match color {
Color::Black => "black",
Color::Blue => "blue",
Color::Red => "red",
Color::Green => "green",
Color::Yellow => "yellow",
Color::Cyan => "cyan",
Color::Purple => "purple",
Color::White => "white",
Color::Fixed(n) => CSS_STYLES_BY_COLOR_ID[n as usize].as_str(),
Color::RGB(_, _, _) => unreachable!(),
}
)
.unwrap()
}
}
#[derive(Completer, Hinter, Validator)]
pub(crate) struct Editor {
hi: RefCell<Highlighter>,
hi_cfg: HighlightConfiguration,
hi_theme: Theme,
cmd_query: Query,
}
impl Editor {
pub fn new() -> Self {
let lang = tree_sitter_rshcmd::language();
let mut hi_cfg =
HighlightConfiguration::new(lang, tree_sitter_rshcmd::HIGHLIGHTS_QUERY, "", "")
.expect("Could not init tree sitter");
let hi_theme: Theme = Default::default();
hi_cfg.configure(&hi_theme.highlight_names);
Editor {
hi: RefCell::new(Highlighter::new()),
hi_cfg,
hi_theme,
cmd_query: Query::new(lang, r"(cmd_name (identifier) @cmd)")
.expect("error building query"),
}
}
}
struct Styling {
current: Vec<StylingChoice>,
}
struct StylingChoice {
range: Range<usize>,
style: ansi_term::Style,
prio: usize,
}
impl Styling {
fn new(_len: usize) -> Self {
Styling {
// current: vec![(0..len, (ansi_term::Style::new(), 0))],
current: Vec::new(),
}
}
fn insert(&mut self, style: ansi_term::Style, range: Range<usize>, prio: usize) {
self.current.push(StylingChoice { range, style, prio });
}
fn resolve_ranges(&self, len: usize) -> Vec<(Range<usize>, &ansi_term::Style)> {
struct StyleList<'a> {
backing: Vec<(usize, &'a ansi_term::Style, usize)>,
}
impl<'a> StyleList<'a> {
fn new<I>(i: I) -> Self
where
I: IntoIterator<Item = (usize, &'a ansi_term::Style, usize)>,
{
let mut backing: Vec<_> = i.into_iter().collect();
backing.sort_by(|a, b| b.2.cmp(&a.2));
Self { backing }
}
fn remove(&mut self, idx: usize) {
let i = self
.backing
.iter()
.enumerate()
.find(|(_, s)| s.0 == idx)
.unwrap()
.0;
self.backing.remove(i);
}
fn insert(&mut self, idx: usize, style: &'a ansi_term::Style, prio: usize) {
self.backing.push((idx, style, prio));
self.backing.sort_by(|a, b| b.2.cmp(&a.2));
}
fn current(&self) -> &'a ansi_term::Style {
self.backing[0].1
}
}
if len > 0 {
let mut start = HashMap::new();
let mut end = HashMap::new();
for (i, r) in self.current.iter().enumerate() {
start
.entry(r.range.start)
.or_insert_with(Vec::new)
.push((i, &r.style, r.prio));
end.entry(r.range.end).or_insert_with(Vec::new).push(i);
}
let mut ranges = Vec::new();
let mut rstart = 0;
let mut styles = StyleList::new(start.get(&0).unwrap().iter().copied());
for i in 1..len {
if let Some(ends) = end.get(&i) {
ranges.push((rstart..i, styles.current()));
for idx in ends {
styles.remove(*idx);
}
rstart = i;
}
if let Some(starts) = start.get(&i) {
for (idx, style, prio) in starts {
styles.insert(*idx, style, *prio);
}
}
}
ranges.push((rstart..len, styles.current()));
ranges
} else {
Vec::new()
}
}
fn paint(&self, source: &str) -> String {
let mut s = Vec::new();
for (range, style) in self.resolve_ranges(source.len()) {
style
.paint(&source.as_bytes()[range])
.write_to(&mut s)
.expect("can fail write in string?");
}
String::from_utf8(s).expect("we got UTF-8 in, hi is UTF8")
}
}
impl HiTrait for Editor {
fn highlight<'l>(&self, line: &'l str, _pos: usize) -> std::borrow::Cow<'l, str> {
let mut hi = self.hi.borrow_mut();
let events = hi
.highlight(&self.hi_cfg, line.as_bytes(), None, |_| None)
.expect("hi failed");
let mut stylings = Styling::new(line.len());
let mut style_stack = vec![self.hi_theme.default_style().ansi];
for event in events {
match event.expect("hi failure") {
HighlightEvent::HighlightStart(kind) => {
style_stack.push(self.hi_theme.styles[kind.0].ansi);
}
HighlightEvent::HighlightEnd => {
style_stack.pop();
}
HighlightEvent::Source { start, end } => {
let style = style_stack.last().unwrap();
stylings.insert(*style, start..end, 1);
}
}
}
let parsed = hi.parser().parse(line, None);
if let Some(parsed) = parsed {
for query_match in
QueryCursor::new().matches(&self.cmd_query, parsed.root_node(), line.as_bytes())
{
for capture in query_match.captures {
let start = capture.node.start_byte();
let end = capture.node.end_byte();
let is_exec = which(&line[start..end]).is_ok();
if is_exec {
stylings.insert(ansi_term::Style::new().fg(Color::Green), start..end, 2);
} else {
stylings.insert(ansi_term::Style::new().fg(Color::Red), start..end, 2);
}
}
}
|
Style
|
identifier_name
|
editor.rs
|
rustyline_derive::{Completer, Hinter, Validator};
use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use serde_json::json;
use std::{cell::RefCell, collections::HashMap, fmt::Write, ops::Range};
use tree_sitter::{Query, QueryCursor};
use tree_sitter_highlight::{HighlightConfiguration, HighlightEvent, Highlighter};
use which::which;
lazy_static! {
static ref CSS_STYLES_BY_COLOR_ID: Vec<String> =
serde_json::from_str(include_str!("../vendor/xterm-colors.json")).unwrap();
}
#[derive(Debug, Default)]
pub struct Style {
pub ansi: ansi_term::Style,
pub css: Option<String>,
}
#[derive(Debug)]
pub struct Theme {
pub styles: Vec<Style>,
pub highlight_names: Vec<String>,
}
#[derive(Default, Deserialize, Serialize)]
pub struct ThemeConfig {
#[serde(default)]
pub theme: Theme,
}
impl Theme {
/* pub fn load(path: &Path) -> std::io::Result<Self> {
let json = std::fs::read_to_string(path)?;
Ok(serde_json::from_str(&json).unwrap_or_default())
} */
pub fn default_style(&self) -> Style {
Style::default()
}
}
impl<'de> Deserialize<'de> for Theme {
fn deserialize<D>(deserializer: D) -> std::result::Result<Theme, D::Error>
where
D: Deserializer<'de>,
{
let mut styles = Vec::new();
let mut highlight_names = Vec::new();
if let Ok(colors) = HashMap::<String, serde_json::Value>::deserialize(deserializer) {
highlight_names.reserve(colors.len());
styles.reserve(colors.len());
for (name, style_value) in colors {
let mut style = Style::default();
parse_style(&mut style, style_value);
highlight_names.push(name);
styles.push(style);
}
}
Ok(Self {
styles,
highlight_names,
})
}
}
impl Serialize for Theme {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.styles.len()))?;
for (name, style) in self.highlight_names.iter().zip(&self.styles) {
let style = &style.ansi;
let color = style.foreground.map(|color| match color {
Color::Black => json!("black"),
Color::Blue => json!("blue"),
Color::Cyan => json!("cyan"),
Color::Green => json!("green"),
Color::Purple => json!("purple"),
Color::Red => json!("red"),
Color::White => json!("white"),
Color::Yellow => json!("yellow"),
Color::RGB(r, g, b) => json!(format!("#{:x?}{:x?}{:x?}", r, g, b)),
Color::Fixed(n) => json!(n),
});
if style.is_bold || style.is_italic || style.is_underline {
let mut style_json = HashMap::new();
if let Some(color) = color {
style_json.insert("color", color);
}
if style.is_bold {
style_json.insert("bold", serde_json::Value::Bool(true));
}
if style.is_italic {
style_json.insert("italic", serde_json::Value::Bool(true));
}
if style.is_underline {
style_json.insert("underline", serde_json::Value::Bool(true));
}
map.serialize_entry(&name, &style_json)?;
} else if let Some(color) = color {
map.serialize_entry(&name, &color)?;
} else {
map.serialize_entry(&name, &serde_json::Value::Null)?;
}
}
map.end()
}
}
impl Default for Theme {
fn default() -> Self {
serde_json::from_str(
r#"
{
"attribute": {"color": 124, "italic": true},
"comment": {"color": 245, "italic": true},
"constant.builtin": {"color": 94, "bold": true},
"constant": 94,
"constructor": 136,
"embedded": null,
"function.builtin": {"color": 26, "bold": true},
"function": 26,
"keyword": 56,
"number": {"color": 94, "bold": true},
"property": 124,
"operator": {"color": 239, "bold": true},
"punctuation.bracket": 239,
"punctuation.delimiter": 239,
"string.special": 30,
"string": 28,
"tag": 18,
"type": 23,
"type.builtin": {"color": 23, "bold": true},
"variable.builtin": {"bold": true},
"variable.parameter": {"underline": true}
}
"#,
)
.unwrap()
}
}
fn parse_style(style: &mut Style, json: serde_json::Value) {
use serde_json::Value;
if let Value::Object(entries) = json {
for (property_name, value) in entries {
match property_name.as_str() {
"bold" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.bold()
}
}
"italic" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.italic()
}
}
"underline" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.underline()
}
}
"color" => {
if let Some(color) = parse_color(value) {
style.ansi = style.ansi.fg(color);
}
}
_ => {}
}
}
style.css = Some(style_to_css(style.ansi));
} else if let Some(color) = parse_color(json) {
style.ansi = style.ansi.fg(color);
style.css = Some(style_to_css(style.ansi));
} else {
style.css = None;
}
if let Some(Color::RGB(red, green, blue)) = style.ansi.foreground {
if!terminal_supports_truecolor() {
style.ansi = style.ansi.fg(closest_xterm_color(red, green, blue));
}
}
}
fn terminal_supports_truecolor() -> bool {
use std::env;
if let Ok(truecolor) = env::var("COLORTERM") {
truecolor == "truecolor" || truecolor == "24bit"
} else {
false
}
}
fn closest_xterm_color(red: u8, green: u8, blue: u8) -> Color {
use std::cmp::{max, min};
let colors = CSS_STYLES_BY_COLOR_ID
.iter()
.enumerate()
.map(|(color_id, hex)| (color_id as u8, hex_string_to_rgb(hex).unwrap()));
// Get the xterm color with the minimum Euclidean distance to the target color
// i.e. distance = √ (r2 - r1)² + (g2 - g1)² + (b2 - b1)²
let distances = colors.map(|(color_id, (r, g, b))| {
let r_delta: u32 = (max(r, red) - min(r, red)).into();
let g_delta: u32 = (max(g, green) - min(g, green)).into();
let b_delta: u32 = (max(b, blue) - min(b, blue)).into();
let distance = r_delta.pow(2) + g_delta.pow(2) + b_delta.pow(2);
// don't need to actually take the square root for the sake of comparison
(color_id, distance)
});
Color::Fixed(distances.min_by(|(_, d1), (_, d2)| d1.cmp(d2)).unwrap().0)
}
fn parse_color(json: serde_json::Value) -> Option<Color> {
use serde_json::Value;
match json {
Value::Number(n) => n.as_u64().map(|c| Color::Fixed(c as u8)),
Value::String(s) => match s.to_lowercase().as_str() {
"black" => Some(Color::Black),
"blue" => Some(Color::Blue),
"cyan" => Some(Color::Cyan),
"green" => Some(Color::Green),
"purple" => Some(Color::Purple),
"red" => Some(Color::Red),
"white" => Some(Color::White),
"yellow" => Some(Color::Yellow),
s => {
if let Some((red, green, blue)) = hex_string_to_rgb(s) {
Some(Color::RGB(red, green, blue))
} else {
None
}
}
},
_ => None,
}
}
fn hex_string_to_rgb(s: &str) -> Option<(u8, u8, u8)> {
if s.starts_with('#') && s.len() >= 7 {
if let (Ok(red), Ok(green), Ok(blue)) = (
u8::from_str_radix(&s[1..3], 16),
u8::from_str_radix(&s[3..5], 16),
u8::from_str_radix(&s[5..7], 16),
) {
Some((red, green, blue))
} else {
None
}
} else {
None
}
}
fn style_to_css(style: ansi_term::Style) -> String {
let mut result = "style='".to_string();
if style.is_underline {
write!(&mut result, "text-decoration: underline;").unwrap();
}
if style.is_bold {
write!(&mut result, "font-weight: bold;").unwrap();
}
if style.is_italic {
write!(&mut result, "font-style: italic;").unwrap();
}
if let Some(color) = style.foreground {
write_color(&mut result, color);
}
result.push('\'');
result
}
fn write_color(buffer: &mut String, color: Color) {
if let Color::RGB(r, g, b) = &color {
write!(buffer, "color: #{:x?}{:x?}{:x?}", r, g, b).unwrap()
} else {
write!(
buffer,
"color: {}",
match color {
Color::Black => "black",
Color::Blue => "blue",
Color::Red => "red",
Color::Green => "green",
Color::Yellow => "yellow",
Color::Cyan => "cyan",
Color::Purple => "purple",
Color::White => "white",
Color::Fixed(n) => CSS_STYLES_BY_COLOR_ID[n as usize].as_str(),
Color::RGB(_, _, _) => unreachable!(),
}
)
.unwrap()
}
}
#[derive(Completer, Hinter, Validator)]
pub(crate) struct Editor {
hi: RefCell<Highlighter>,
hi_cfg: HighlightConfiguration,
hi_theme: Theme,
cmd_query: Query,
}
impl Editor {
pub fn new() -> Self {
let lang = tree_sitter_rshcmd::language();
let mut hi_cfg =
HighlightConfiguration::new(lang, tree_sitter_rshcmd::HIGHLIGHTS_QUERY, "", "")
.expect("Could not init tree sitter");
let hi_theme: Theme = Default::default();
hi_cfg.configure(&hi_theme.highlight_names);
Editor {
hi: RefCell::new(Highlighter::new()),
hi_cfg,
hi_theme,
cmd_query: Query::new(lang, r"(cmd_name (identifier) @cmd)")
.expect("error building query"),
}
}
}
struct Styling {
current: Vec<StylingChoice>,
}
struct StylingChoice {
range: Range<usize>,
style: ansi_term::Style,
prio: usize,
}
impl Styling {
fn new(_len: usize) -> Self {
Styling {
// current: vec![(0..len, (ansi_term::Style::new(), 0))],
current: Vec::new(),
}
}
fn insert(&mut self, style: ansi_term::Style, range: Range<usize>, prio: usize) {
self.current.push(StylingChoice { range, style, prio });
}
fn resolve_ranges(&self, len: usize) -> Vec<(Range<usize>, &ansi_term::Style)> {
struct StyleList<'a> {
backing: Vec<(usize, &'a ansi_term::Style, usize)>,
}
impl<'a> StyleList<'a> {
fn new<I>(i: I) -> Self
where
I: IntoIterator<Item = (usize, &'a ansi_term::Style, usize)>,
{
let mut backing: Vec<_> = i.into_iter().collect();
backing.sort_by(|a, b| b.2.cmp(&a.2));
Self { backing }
}
fn remove(&mut self, idx: usize) {
let i = self
.backing
.iter()
.enumerate()
.find(|(_, s)| s.0 == idx)
.unwrap()
.0;
self.backing.remove(i);
}
fn insert(&mut self, idx: usize, style: &'a ansi_term::Style, prio: usize) {
self.backing.push((idx, style, prio));
self.backing.sort_by(|a, b| b.2.cmp(&a.2));
}
fn current(&self) -> &'a ansi_term::Style {
|
}
if len > 0 {
let mut start = HashMap::new();
let mut end = HashMap::new();
for (i, r) in self.current.iter().enumerate() {
start
.entry(r.range.start)
.or_insert_with(Vec::new)
.push((i, &r.style, r.prio));
end.entry(r.range.end).or_insert_with(Vec::new).push(i);
}
let mut ranges = Vec::new();
let mut rstart = 0;
let mut styles = StyleList::new(start.get(&0).unwrap().iter().copied());
for i in 1..len {
if let Some(ends) = end.get(&i) {
ranges.push((rstart..i, styles.current()));
for idx in ends {
styles.remove(*idx);
}
rstart = i;
}
if let Some(starts) = start.get(&i) {
for (idx, style, prio) in starts {
styles.insert(*idx, style, *prio);
}
}
}
ranges.push((rstart..len, styles.current()));
ranges
} else {
Vec::new()
}
}
fn paint(&self, source: &str) -> String {
let mut s = Vec::new();
for (range, style) in self.resolve_ranges(source.len()) {
style
.paint(&source.as_bytes()[range])
.write_to(&mut s)
.expect("can fail write in string?");
}
String::from_utf8(s).expect("we got UTF-8 in, hi is UTF8")
}
}
impl HiTrait for Editor {
fn highlight<'l>(&self, line: &'l str, _pos: usize) -> std::borrow::Cow<'l, str> {
let mut hi = self.hi.borrow_mut();
let events = hi
.highlight(&self.hi_cfg, line.as_bytes(), None, |_| None)
.expect("hi failed");
let mut stylings = Styling::new(line.len());
let mut style_stack = vec![self.hi_theme.default_style().ansi];
for event in events {
match event.expect("hi failure") {
HighlightEvent::HighlightStart(kind) => {
style_stack.push(self.hi_theme.styles[kind.0].ansi);
}
HighlightEvent::HighlightEnd => {
style_stack.pop();
}
HighlightEvent::Source { start, end } => {
let style = style_stack.last().unwrap();
stylings.insert(*style, start..end, 1);
}
}
}
let parsed = hi.parser().parse(line, None);
if let Some(parsed) = parsed {
for query_match in
QueryCursor::new().matches(&self.cmd_query, parsed.root_node(), line.as_bytes())
{
for capture in query_match.captures {
let start = capture.node.start_byte();
let end = capture.node.end_byte();
let is_exec = which(&line[start..end]).is_ok();
if is_exec {
stylings.insert(ansi_term::Style::new().fg(Color::Green), start..end, 2);
} else {
stylings.insert(ansi_term::Style::new().fg(Color::Red), start..end, 2);
}
}
}
|
self.backing[0].1
}
|
identifier_body
|
lib.rs
|
//! # docker-bisect
//! `docker-bisect` create assumes that the docker daemon is running and that you have a
//! docker image with cached layers to probe.
extern crate colored;
extern crate dockworker;
extern crate indicatif;
extern crate rand;
use std::clone::Clone;
use std::fmt;
use std::io::{prelude::*, Error, ErrorKind};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
use colored::*;
use dockworker::*;
use indicatif::ProgressBar;
use rand::Rng;
/// Truncates a string to a single line with a max width
/// and removes docker prefixes.
///
/// # Example
/// ```
/// use docker_bisect::truncate;
/// let line = "blar #(nop) real command\n line 2";
/// assert_eq!("real com", truncate(&line, 8));
/// ```
pub fn truncate(mut s: &str, max_chars: usize) -> &str {
s = s.lines().next().expect("nothing to truncate");
if s.contains("#(nop) ") {
let mut splat = s.split(" #(nop) ");
let _ = splat.next();
s = splat.next().expect("#(nop) with no command in.");
s = s.trim();
}
match s.char_indices().nth(max_chars) {
None => s,
Some((idx, _)) => &s[..idx],
}
}
/// A layer in a docker image. (A layer is a set of files changed due to the previous command).
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct Layer {
pub height: usize,
pub image_name: String,
pub creation_command: String,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {:?}", self.image_name, self.creation_command)
}
}
/// The stderr/stdout of running the command on a container made of this layer
/// (on top of all earlier layers). If command hit the timeout the result may be truncated or empty.
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct LayerResult {
pub layer: Layer,
pub result: String,
}
impl fmt::Display for LayerResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {}", self.layer, self.result)
}
}
/// A Transition is the LayerResult of running the command on the lower layer
/// and of running the command on the higher layer. No-op transitions are not recorded.
#[derive(Debug, Eq, Ord, PartialOrd, PartialEq)]
pub struct Transition {
pub before: Option<LayerResult>,
pub after: LayerResult,
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.before {
Some(be) => write!(f, "({} -> {})", be, self.after),
None => write!(f, "-> {}", self.after),
}
}
}
/// Starts the bisect operation. Calculates highest and lowest layer result and if they have
/// different outputs it starts a binary chop to figure out which layer(s) caused the change.
fn
|
<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let first_layer = layers.first().expect("no first layer");
let last_layer = layers.last().expect("no last layer");
let first_image_name: String = first_layer.image_name.clone();
let last_image_name = &last_layer.image_name;
let action_c = action.clone();
let left_handle = thread::spawn(move || action_c.try_container(&first_image_name));
let end = action.try_container(last_image_name);
let start = left_handle.join().expect("first layer execution error!");
if start == end {
return Ok(vec![Transition {
before: None,
after: LayerResult {
layer: last_layer.clone(),
result: start,
},
}]);
}
bisect(
Vec::from(&layers[1..layers.len() - 1]),
LayerResult {
layer: first_layer.clone(),
result: start,
},
LayerResult {
layer: last_layer.clone(),
result: end,
},
action,
)
}
fn bisect<T>(
history: Vec<Layer>,
start: LayerResult,
end: LayerResult,
action: &T,
) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let size = history.len();
if size == 0 {
if start.result == end.result {
return Err(Error::new(std::io::ErrorKind::Other, ""));
}
return Ok(vec![Transition {
before: Some(start.clone()),
after: end.clone(),
}]);
}
let half = size / 2;
let mid_result = LayerResult {
layer: history[half].clone(),
result: action.try_container(&history[half].image_name),
};
if size == 1 {
let mut results = Vec::<Transition>::new();
if *start.result!= mid_result.result {
results.push(Transition {
before: Some(start.clone()),
after: mid_result.clone(),
});
}
if mid_result.result!= *end.result {
results.push(Transition {
before: Some(mid_result),
after: end.clone(),
});
}
return Ok(results);
}
if start.result == mid_result.result {
action.skip((mid_result.layer.height - start.layer.height) as u64);
return bisect(Vec::from(&history[half + 1..]), mid_result, end, action);
}
if mid_result.result == end.result {
action.skip((end.layer.height - mid_result.layer.height) as u64);
return bisect(Vec::from(&history[..half]), start, mid_result, action);
}
let clone_a = action.clone();
let clone_b = action.clone();
let mid_result_c = mid_result.clone();
let hist_a = Vec::from(&history[..half]);
let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a));
let right_handle =
thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b));
let mut left_results: Vec<Transition> = left_handle
.join()
.expect("left")
.expect("left transition err");
let right_results: Vec<Transition> = right_handle
.join()
.expect("right")
.expect("right transition err");
left_results.extend(right_results); // These results are sorted later...
Ok(left_results)
}
trait ContainerAction: Clone + Send {
fn try_container(&self, container_id: &str) -> String;
fn skip(&self, count: u64) -> ();
}
#[derive(Clone)]
struct DockerContainer {
pb: Arc<ProgressBar>,
command_line: Vec<String>,
timeout_in_seconds: usize,
}
impl DockerContainer {
fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer {
let pb = Arc::new(ProgressBar::new(total));
DockerContainer {
pb,
command_line,
timeout_in_seconds,
}
}
}
struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize }
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
unsafe { self.buf.set_len(self.len); }
}
}
impl ContainerAction for DockerContainer {
fn try_container(&self, container_id: &str) -> String {
let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?");
let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string();
//Create container
let mut create = ContainerCreateOptions::new(&container_id);
let mut host_config = ContainerHostConfig::new();
host_config.auto_remove(false);
create.host_config(host_config);
let it = self.command_line.iter();
for command in it {
create.cmd(command.clone());
}
let container: CreateContainerResponse = docker
.create_container(Some(&container_name), &create)
.expect("couldn't create container");
let result = docker.start_container(&container.id);
if result.is_err() {
let err: dockworker::errors::Error = result.unwrap_err();
return format!("{}", err);
}
let log_options = ContainerLogOptions {
stdout: true,
stderr: true,
since: None,
timestamps: None,
tail: None,
follow: true,
};
let timeout = Duration::from_secs(self.timeout_in_seconds as u64);
let mut container_output = String::new();
let now = SystemTime::now();
let timeout_time = now + timeout;
let result = docker.log_container(&container_name, &log_options);
if let Ok(result) = result {
let mut r = result;
let reservation_size = 32;
let mut buf = Vec::<u8>::new();
{
let mut g = Guard { len: buf.len(), buf: &mut buf };
loop {
if g.len == g.buf.len() {
g.buf.resize(g.len + reservation_size, 0);
}
match r.read(&mut g.buf[g.len..]) {
Ok(0) => { break; }
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(_e) => { break; }
}
if SystemTime::now() > timeout_time {
break;
}
}
}
container_output = String::from_utf8_lossy(&buf).to_string();
}
self.pb.inc(1);
let _stop_result = docker.stop_container(&container.id, timeout);
container_output
}
fn skip(&self, count: u64) -> () {
self.pb.inc(count);
}
}
/// Struct to hold parameters.
pub struct BisectOptions {
pub timeout_in_seconds: usize,
pub trunc_size: usize,
}
/// Create containers based on layers and run command_line against them.
/// Result is the differences in std out and std err.
pub fn try_bisect(
histories: &Vec<ImageLayer>,
command_line: Vec<String>,
options: BisectOptions,
) -> Result<Vec<Transition>, Error> {
println!(
"\n{}\n\n{:?}\n",
"Command to apply to layers:".bold(),
&command_line
);
let create_and_try_container = DockerContainer::new(
histories.len() as u64,
command_line,
options.timeout_in_seconds,
);
println!("{}", "Skipped missing layers:".bold());
println!();
let mut layers = Vec::new();
for (index, event) in histories.iter().rev().enumerate() {
let mut created = event.created_by.clone();
created = truncate(&created, options.trunc_size).to_string();
match event.id.clone() {
Some(layer_name) => layers.push(Layer {
height: index,
image_name: layer_name,
creation_command: event.created_by.clone(),
}),
None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)),
}
}
println!();
println!(
"{}",
"Bisecting found layers (running command on the layers) ==>\n".bold()
);
if layers.len() < 2 {
println!();
eprintln!(
"{} layers found in cache - not enough layers to bisect.",
layers.len()
);
return Err(Error::new(
std::io::ErrorKind::Other,
"no cached layers found!",
));
}
let results = get_changes(layers, &create_and_try_container);
create_and_try_container.pb.finish_with_message("done");
results
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[derive(Clone)]
struct MapAction {
map: HashMap<String, String>,
}
impl MapAction {
fn new(from: Vec<usize>, to: Vec<&str>) -> Self {
let mut object = MapAction {
map: HashMap::new(),
};
for (f, t) in from.iter().zip(to.iter()) {
object.map.insert(f.to_string(), t.to_string());
}
object
}
}
impl ContainerAction for MapAction {
fn try_container(&self, container_id: &str) -> String {
let none = String::new();
let result: &String = self.map.get(container_id).unwrap_or(&none);
result.clone()
}
fn skip(&self, _count: u64) -> () {}
}
fn lay(id: usize) -> Layer {
Layer {
height: id,
image_name: id.to_string(),
creation_command: id.to_string(),
}
}
#[test]
fn if_output_always_same_return_earliest_command() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "A"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: None,
after: LayerResult {
layer: lay(3),
result: "A".to_string()
},
}]
);
}
#[test]
fn if_one_difference_show_command_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "B"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: Some(LayerResult {
layer: lay(2),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(3),
result: "B".to_string()
},
}]
);
}
#[test]
fn if_two_differences_show_two_commands_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3), lay(4)],
&MapAction::new(vec![1, 2, 3, 4], vec!["A", "B", "B", "C"]),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
}
]
);
}
#[test]
fn three_transitions() {
let results = get_changes(
vec![
lay(1),
lay(2),
lay(3),
lay(4),
lay(5),
lay(6),
lay(7),
lay(8),
lay(9),
lay(10),
],
&MapAction::new(
vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
vec!["A", "B", "B", "C", "C", "C", "C", "C", "D", "D"],
),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(8),
result: "C".to_string()
}),
after: LayerResult {
layer: lay(9),
result: "D".to_string()
},
}
]
);
}
}
|
get_changes
|
identifier_name
|
lib.rs
|
//! # docker-bisect
//! `docker-bisect` create assumes that the docker daemon is running and that you have a
//! docker image with cached layers to probe.
extern crate colored;
extern crate dockworker;
extern crate indicatif;
extern crate rand;
use std::clone::Clone;
use std::fmt;
use std::io::{prelude::*, Error, ErrorKind};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
use colored::*;
use dockworker::*;
use indicatif::ProgressBar;
use rand::Rng;
/// Truncates a string to a single line with a max width
/// and removes docker prefixes.
///
/// # Example
/// ```
/// use docker_bisect::truncate;
/// let line = "blar #(nop) real command\n line 2";
/// assert_eq!("real com", truncate(&line, 8));
/// ```
pub fn truncate(mut s: &str, max_chars: usize) -> &str {
s = s.lines().next().expect("nothing to truncate");
if s.contains("#(nop) ") {
let mut splat = s.split(" #(nop) ");
let _ = splat.next();
s = splat.next().expect("#(nop) with no command in.");
s = s.trim();
}
match s.char_indices().nth(max_chars) {
None => s,
Some((idx, _)) => &s[..idx],
}
}
/// A layer in a docker image. (A layer is a set of files changed due to the previous command).
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct Layer {
pub height: usize,
pub image_name: String,
pub creation_command: String,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {:?}", self.image_name, self.creation_command)
}
}
/// The stderr/stdout of running the command on a container made of this layer
/// (on top of all earlier layers). If command hit the timeout the result may be truncated or empty.
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct LayerResult {
pub layer: Layer,
pub result: String,
}
impl fmt::Display for LayerResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {}", self.layer, self.result)
}
}
/// A Transition is the LayerResult of running the command on the lower layer
/// and of running the command on the higher layer. No-op transitions are not recorded.
#[derive(Debug, Eq, Ord, PartialOrd, PartialEq)]
pub struct Transition {
pub before: Option<LayerResult>,
pub after: LayerResult,
}
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.before {
Some(be) => write!(f, "({} -> {})", be, self.after),
None => write!(f, "-> {}", self.after),
}
}
}
/// Starts the bisect operation. Calculates highest and lowest layer result and if they have
/// different outputs it starts a binary chop to figure out which layer(s) caused the change.
fn get_changes<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let first_layer = layers.first().expect("no first layer");
let last_layer = layers.last().expect("no last layer");
let first_image_name: String = first_layer.image_name.clone();
let last_image_name = &last_layer.image_name;
let action_c = action.clone();
let left_handle = thread::spawn(move || action_c.try_container(&first_image_name));
let end = action.try_container(last_image_name);
let start = left_handle.join().expect("first layer execution error!");
if start == end {
return Ok(vec![Transition {
before: None,
after: LayerResult {
layer: last_layer.clone(),
result: start,
},
}]);
}
bisect(
Vec::from(&layers[1..layers.len() - 1]),
LayerResult {
layer: first_layer.clone(),
result: start,
},
LayerResult {
layer: last_layer.clone(),
result: end,
},
action,
)
}
fn bisect<T>(
history: Vec<Layer>,
start: LayerResult,
end: LayerResult,
action: &T,
) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let size = history.len();
if size == 0 {
if start.result == end.result {
return Err(Error::new(std::io::ErrorKind::Other, ""));
}
return Ok(vec![Transition {
before: Some(start.clone()),
after: end.clone(),
}]);
}
let half = size / 2;
let mid_result = LayerResult {
layer: history[half].clone(),
result: action.try_container(&history[half].image_name),
};
if size == 1 {
let mut results = Vec::<Transition>::new();
if *start.result!= mid_result.result {
results.push(Transition {
before: Some(start.clone()),
after: mid_result.clone(),
});
}
if mid_result.result!= *end.result {
results.push(Transition {
before: Some(mid_result),
after: end.clone(),
});
}
return Ok(results);
}
if start.result == mid_result.result {
action.skip((mid_result.layer.height - start.layer.height) as u64);
return bisect(Vec::from(&history[half + 1..]), mid_result, end, action);
}
if mid_result.result == end.result {
action.skip((end.layer.height - mid_result.layer.height) as u64);
return bisect(Vec::from(&history[..half]), start, mid_result, action);
}
let clone_a = action.clone();
let clone_b = action.clone();
let mid_result_c = mid_result.clone();
let hist_a = Vec::from(&history[..half]);
let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a));
let right_handle =
thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b));
let mut left_results: Vec<Transition> = left_handle
.join()
.expect("left")
.expect("left transition err");
let right_results: Vec<Transition> = right_handle
.join()
.expect("right")
.expect("right transition err");
left_results.extend(right_results); // These results are sorted later...
Ok(left_results)
}
trait ContainerAction: Clone + Send {
fn try_container(&self, container_id: &str) -> String;
fn skip(&self, count: u64) -> ();
}
#[derive(Clone)]
struct DockerContainer {
pb: Arc<ProgressBar>,
command_line: Vec<String>,
timeout_in_seconds: usize,
}
impl DockerContainer {
fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer {
let pb = Arc::new(ProgressBar::new(total));
DockerContainer {
pb,
command_line,
timeout_in_seconds,
}
}
}
struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize }
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
unsafe { self.buf.set_len(self.len); }
}
}
impl ContainerAction for DockerContainer {
fn try_container(&self, container_id: &str) -> String {
let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?");
let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string();
//Create container
let mut create = ContainerCreateOptions::new(&container_id);
let mut host_config = ContainerHostConfig::new();
host_config.auto_remove(false);
create.host_config(host_config);
let it = self.command_line.iter();
for command in it {
create.cmd(command.clone());
}
let container: CreateContainerResponse = docker
.create_container(Some(&container_name), &create)
.expect("couldn't create container");
let result = docker.start_container(&container.id);
if result.is_err() {
let err: dockworker::errors::Error = result.unwrap_err();
return format!("{}", err);
}
let log_options = ContainerLogOptions {
stdout: true,
stderr: true,
since: None,
timestamps: None,
tail: None,
follow: true,
};
let timeout = Duration::from_secs(self.timeout_in_seconds as u64);
let mut container_output = String::new();
let now = SystemTime::now();
let timeout_time = now + timeout;
let result = docker.log_container(&container_name, &log_options);
if let Ok(result) = result {
let mut r = result;
let reservation_size = 32;
let mut buf = Vec::<u8>::new();
{
let mut g = Guard { len: buf.len(), buf: &mut buf };
loop {
if g.len == g.buf.len() {
g.buf.resize(g.len + reservation_size, 0);
}
match r.read(&mut g.buf[g.len..]) {
Ok(0) => { break; }
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(_e) => { break; }
}
if SystemTime::now() > timeout_time {
break;
}
}
}
container_output = String::from_utf8_lossy(&buf).to_string();
}
self.pb.inc(1);
let _stop_result = docker.stop_container(&container.id, timeout);
container_output
}
fn skip(&self, count: u64) -> () {
self.pb.inc(count);
}
}
/// Struct to hold parameters.
pub struct BisectOptions {
pub timeout_in_seconds: usize,
pub trunc_size: usize,
}
/// Create containers based on layers and run command_line against them.
/// Result is the differences in std out and std err.
pub fn try_bisect(
histories: &Vec<ImageLayer>,
command_line: Vec<String>,
options: BisectOptions,
) -> Result<Vec<Transition>, Error> {
println!(
"\n{}\n\n{:?}\n",
"Command to apply to layers:".bold(),
&command_line
);
let create_and_try_container = DockerContainer::new(
histories.len() as u64,
command_line,
options.timeout_in_seconds,
);
println!("{}", "Skipped missing layers:".bold());
println!();
let mut layers = Vec::new();
for (index, event) in histories.iter().rev().enumerate() {
let mut created = event.created_by.clone();
created = truncate(&created, options.trunc_size).to_string();
match event.id.clone() {
Some(layer_name) => layers.push(Layer {
height: index,
image_name: layer_name,
creation_command: event.created_by.clone(),
}),
None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)),
}
}
println!();
println!(
"{}",
"Bisecting found layers (running command on the layers) ==>\n".bold()
);
if layers.len() < 2 {
println!();
eprintln!(
"{} layers found in cache - not enough layers to bisect.",
layers.len()
);
return Err(Error::new(
std::io::ErrorKind::Other,
"no cached layers found!",
));
}
let results = get_changes(layers, &create_and_try_container);
create_and_try_container.pb.finish_with_message("done");
results
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[derive(Clone)]
struct MapAction {
map: HashMap<String, String>,
}
impl MapAction {
fn new(from: Vec<usize>, to: Vec<&str>) -> Self {
let mut object = MapAction {
map: HashMap::new(),
};
for (f, t) in from.iter().zip(to.iter()) {
object.map.insert(f.to_string(), t.to_string());
}
object
}
}
impl ContainerAction for MapAction {
fn try_container(&self, container_id: &str) -> String {
let none = String::new();
let result: &String = self.map.get(container_id).unwrap_or(&none);
result.clone()
}
fn skip(&self, _count: u64) -> () {}
}
fn lay(id: usize) -> Layer {
Layer {
height: id,
image_name: id.to_string(),
creation_command: id.to_string(),
}
}
#[test]
fn if_output_always_same_return_earliest_command() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "A"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: None,
after: LayerResult {
layer: lay(3),
result: "A".to_string()
},
}]
);
}
#[test]
fn if_one_difference_show_command_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "B"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: Some(LayerResult {
layer: lay(2),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(3),
result: "B".to_string()
},
}]
);
}
#[test]
fn if_two_differences_show_two_commands_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3), lay(4)],
&MapAction::new(vec![1, 2, 3, 4], vec!["A", "B", "B", "C"]),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
}
]
);
}
#[test]
fn three_transitions() {
let results = get_changes(
vec![
lay(1),
lay(2),
lay(3),
lay(4),
lay(5),
lay(6),
lay(7),
lay(8),
lay(9),
lay(10),
],
&MapAction::new(
vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
vec!["A", "B", "B", "C", "C", "C", "C", "C", "D", "D"],
),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(8),
result: "C".to_string()
}),
after: LayerResult {
layer: lay(9),
result: "D".to_string()
},
}
]
);
}
}
|
impl fmt::Display for Transition {
|
random_line_split
|
lib.rs
|
//! # docker-bisect
//! `docker-bisect` create assumes that the docker daemon is running and that you have a
//! docker image with cached layers to probe.
extern crate colored;
extern crate dockworker;
extern crate indicatif;
extern crate rand;
use std::clone::Clone;
use std::fmt;
use std::io::{prelude::*, Error, ErrorKind};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
use colored::*;
use dockworker::*;
use indicatif::ProgressBar;
use rand::Rng;
/// Truncates a string to a single line with a max width
/// and removes docker prefixes.
///
/// # Example
/// ```
/// use docker_bisect::truncate;
/// let line = "blar #(nop) real command\n line 2";
/// assert_eq!("real com", truncate(&line, 8));
/// ```
pub fn truncate(mut s: &str, max_chars: usize) -> &str {
s = s.lines().next().expect("nothing to truncate");
if s.contains("#(nop) ") {
let mut splat = s.split(" #(nop) ");
let _ = splat.next();
s = splat.next().expect("#(nop) with no command in.");
s = s.trim();
}
match s.char_indices().nth(max_chars) {
None => s,
Some((idx, _)) => &s[..idx],
}
}
/// A layer in a docker image. (A layer is a set of files changed due to the previous command).
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct Layer {
pub height: usize,
pub image_name: String,
pub creation_command: String,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {:?}", self.image_name, self.creation_command)
}
}
/// The stderr/stdout of running the command on a container made of this layer
/// (on top of all earlier layers). If command hit the timeout the result may be truncated or empty.
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct LayerResult {
pub layer: Layer,
pub result: String,
}
impl fmt::Display for LayerResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {}", self.layer, self.result)
}
}
/// A Transition is the LayerResult of running the command on the lower layer
/// and of running the command on the higher layer. No-op transitions are not recorded.
#[derive(Debug, Eq, Ord, PartialOrd, PartialEq)]
pub struct Transition {
pub before: Option<LayerResult>,
pub after: LayerResult,
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.before {
Some(be) => write!(f, "({} -> {})", be, self.after),
None => write!(f, "-> {}", self.after),
}
}
}
/// Starts the bisect operation. Calculates highest and lowest layer result and if they have
/// different outputs it starts a binary chop to figure out which layer(s) caused the change.
fn get_changes<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let first_layer = layers.first().expect("no first layer");
let last_layer = layers.last().expect("no last layer");
let first_image_name: String = first_layer.image_name.clone();
let last_image_name = &last_layer.image_name;
let action_c = action.clone();
let left_handle = thread::spawn(move || action_c.try_container(&first_image_name));
let end = action.try_container(last_image_name);
let start = left_handle.join().expect("first layer execution error!");
if start == end {
return Ok(vec![Transition {
before: None,
after: LayerResult {
layer: last_layer.clone(),
result: start,
},
}]);
}
bisect(
Vec::from(&layers[1..layers.len() - 1]),
LayerResult {
layer: first_layer.clone(),
result: start,
},
LayerResult {
layer: last_layer.clone(),
result: end,
},
action,
)
}
fn bisect<T>(
history: Vec<Layer>,
start: LayerResult,
end: LayerResult,
action: &T,
) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let size = history.len();
if size == 0 {
if start.result == end.result {
return Err(Error::new(std::io::ErrorKind::Other, ""));
}
return Ok(vec![Transition {
before: Some(start.clone()),
after: end.clone(),
}]);
}
let half = size / 2;
let mid_result = LayerResult {
layer: history[half].clone(),
result: action.try_container(&history[half].image_name),
};
if size == 1 {
let mut results = Vec::<Transition>::new();
if *start.result!= mid_result.result {
results.push(Transition {
before: Some(start.clone()),
after: mid_result.clone(),
});
}
if mid_result.result!= *end.result {
results.push(Transition {
before: Some(mid_result),
after: end.clone(),
});
}
return Ok(results);
}
if start.result == mid_result.result {
action.skip((mid_result.layer.height - start.layer.height) as u64);
return bisect(Vec::from(&history[half + 1..]), mid_result, end, action);
}
if mid_result.result == end.result {
action.skip((end.layer.height - mid_result.layer.height) as u64);
return bisect(Vec::from(&history[..half]), start, mid_result, action);
}
let clone_a = action.clone();
let clone_b = action.clone();
let mid_result_c = mid_result.clone();
let hist_a = Vec::from(&history[..half]);
let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a));
let right_handle =
thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b));
let mut left_results: Vec<Transition> = left_handle
.join()
.expect("left")
.expect("left transition err");
let right_results: Vec<Transition> = right_handle
.join()
.expect("right")
.expect("right transition err");
left_results.extend(right_results); // These results are sorted later...
Ok(left_results)
}
trait ContainerAction: Clone + Send {
fn try_container(&self, container_id: &str) -> String;
fn skip(&self, count: u64) -> ();
}
#[derive(Clone)]
struct DockerContainer {
pb: Arc<ProgressBar>,
command_line: Vec<String>,
timeout_in_seconds: usize,
}
impl DockerContainer {
fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer {
let pb = Arc::new(ProgressBar::new(total));
DockerContainer {
pb,
command_line,
timeout_in_seconds,
}
}
}
struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize }
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
unsafe { self.buf.set_len(self.len); }
}
}
impl ContainerAction for DockerContainer {
fn try_container(&self, container_id: &str) -> String {
let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?");
let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string();
//Create container
let mut create = ContainerCreateOptions::new(&container_id);
let mut host_config = ContainerHostConfig::new();
host_config.auto_remove(false);
create.host_config(host_config);
let it = self.command_line.iter();
for command in it {
create.cmd(command.clone());
}
let container: CreateContainerResponse = docker
.create_container(Some(&container_name), &create)
.expect("couldn't create container");
let result = docker.start_container(&container.id);
if result.is_err() {
let err: dockworker::errors::Error = result.unwrap_err();
return format!("{}", err);
}
let log_options = ContainerLogOptions {
stdout: true,
stderr: true,
since: None,
timestamps: None,
tail: None,
follow: true,
};
let timeout = Duration::from_secs(self.timeout_in_seconds as u64);
let mut container_output = String::new();
let now = SystemTime::now();
let timeout_time = now + timeout;
let result = docker.log_container(&container_name, &log_options);
if let Ok(result) = result {
let mut r = result;
let reservation_size = 32;
let mut buf = Vec::<u8>::new();
{
let mut g = Guard { len: buf.len(), buf: &mut buf };
loop {
if g.len == g.buf.len()
|
match r.read(&mut g.buf[g.len..]) {
Ok(0) => { break; }
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(_e) => { break; }
}
if SystemTime::now() > timeout_time {
break;
}
}
}
container_output = String::from_utf8_lossy(&buf).to_string();
}
self.pb.inc(1);
let _stop_result = docker.stop_container(&container.id, timeout);
container_output
}
fn skip(&self, count: u64) -> () {
self.pb.inc(count);
}
}
/// Struct to hold parameters.
pub struct BisectOptions {
pub timeout_in_seconds: usize,
pub trunc_size: usize,
}
/// Create containers based on layers and run command_line against them.
/// Result is the differences in std out and std err.
pub fn try_bisect(
histories: &Vec<ImageLayer>,
command_line: Vec<String>,
options: BisectOptions,
) -> Result<Vec<Transition>, Error> {
println!(
"\n{}\n\n{:?}\n",
"Command to apply to layers:".bold(),
&command_line
);
let create_and_try_container = DockerContainer::new(
histories.len() as u64,
command_line,
options.timeout_in_seconds,
);
println!("{}", "Skipped missing layers:".bold());
println!();
let mut layers = Vec::new();
for (index, event) in histories.iter().rev().enumerate() {
let mut created = event.created_by.clone();
created = truncate(&created, options.trunc_size).to_string();
match event.id.clone() {
Some(layer_name) => layers.push(Layer {
height: index,
image_name: layer_name,
creation_command: event.created_by.clone(),
}),
None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)),
}
}
println!();
println!(
"{}",
"Bisecting found layers (running command on the layers) ==>\n".bold()
);
if layers.len() < 2 {
println!();
eprintln!(
"{} layers found in cache - not enough layers to bisect.",
layers.len()
);
return Err(Error::new(
std::io::ErrorKind::Other,
"no cached layers found!",
));
}
let results = get_changes(layers, &create_and_try_container);
create_and_try_container.pb.finish_with_message("done");
results
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[derive(Clone)]
struct MapAction {
map: HashMap<String, String>,
}
impl MapAction {
fn new(from: Vec<usize>, to: Vec<&str>) -> Self {
let mut object = MapAction {
map: HashMap::new(),
};
for (f, t) in from.iter().zip(to.iter()) {
object.map.insert(f.to_string(), t.to_string());
}
object
}
}
impl ContainerAction for MapAction {
fn try_container(&self, container_id: &str) -> String {
let none = String::new();
let result: &String = self.map.get(container_id).unwrap_or(&none);
result.clone()
}
fn skip(&self, _count: u64) -> () {}
}
fn lay(id: usize) -> Layer {
Layer {
height: id,
image_name: id.to_string(),
creation_command: id.to_string(),
}
}
#[test]
fn if_output_always_same_return_earliest_command() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "A"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: None,
after: LayerResult {
layer: lay(3),
result: "A".to_string()
},
}]
);
}
#[test]
fn if_one_difference_show_command_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "B"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: Some(LayerResult {
layer: lay(2),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(3),
result: "B".to_string()
},
}]
);
}
#[test]
fn if_two_differences_show_two_commands_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3), lay(4)],
&MapAction::new(vec![1, 2, 3, 4], vec!["A", "B", "B", "C"]),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
}
]
);
}
#[test]
fn three_transitions() {
let results = get_changes(
vec![
lay(1),
lay(2),
lay(3),
lay(4),
lay(5),
lay(6),
lay(7),
lay(8),
lay(9),
lay(10),
],
&MapAction::new(
vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
vec!["A", "B", "B", "C", "C", "C", "C", "C", "D", "D"],
),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(8),
result: "C".to_string()
}),
after: LayerResult {
layer: lay(9),
result: "D".to_string()
},
}
]
);
}
}
|
{
g.buf.resize(g.len + reservation_size, 0);
}
|
conditional_block
|
lib.rs
|
//! # docker-bisect
//! `docker-bisect` create assumes that the docker daemon is running and that you have a
//! docker image with cached layers to probe.
extern crate colored;
extern crate dockworker;
extern crate indicatif;
extern crate rand;
use std::clone::Clone;
use std::fmt;
use std::io::{prelude::*, Error, ErrorKind};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
use colored::*;
use dockworker::*;
use indicatif::ProgressBar;
use rand::Rng;
/// Truncates a string to a single line with a max width
/// and removes docker prefixes.
///
/// # Example
/// ```
/// use docker_bisect::truncate;
/// let line = "blar #(nop) real command\n line 2";
/// assert_eq!("real com", truncate(&line, 8));
/// ```
pub fn truncate(mut s: &str, max_chars: usize) -> &str {
s = s.lines().next().expect("nothing to truncate");
if s.contains("#(nop) ") {
let mut splat = s.split(" #(nop) ");
let _ = splat.next();
s = splat.next().expect("#(nop) with no command in.");
s = s.trim();
}
match s.char_indices().nth(max_chars) {
None => s,
Some((idx, _)) => &s[..idx],
}
}
/// A layer in a docker image. (A layer is a set of files changed due to the previous command).
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct Layer {
pub height: usize,
pub image_name: String,
pub creation_command: String,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {:?}", self.image_name, self.creation_command)
}
}
/// The stderr/stdout of running the command on a container made of this layer
/// (on top of all earlier layers). If command hit the timeout the result may be truncated or empty.
#[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)]
pub struct LayerResult {
pub layer: Layer,
pub result: String,
}
impl fmt::Display for LayerResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {}", self.layer, self.result)
}
}
/// A Transition is the LayerResult of running the command on the lower layer
/// and of running the command on the higher layer. No-op transitions are not recorded.
#[derive(Debug, Eq, Ord, PartialOrd, PartialEq)]
pub struct Transition {
pub before: Option<LayerResult>,
pub after: LayerResult,
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.before {
Some(be) => write!(f, "({} -> {})", be, self.after),
None => write!(f, "-> {}", self.after),
}
}
}
/// Starts the bisect operation. Calculates highest and lowest layer result and if they have
/// different outputs it starts a binary chop to figure out which layer(s) caused the change.
fn get_changes<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let first_layer = layers.first().expect("no first layer");
let last_layer = layers.last().expect("no last layer");
let first_image_name: String = first_layer.image_name.clone();
let last_image_name = &last_layer.image_name;
let action_c = action.clone();
let left_handle = thread::spawn(move || action_c.try_container(&first_image_name));
let end = action.try_container(last_image_name);
let start = left_handle.join().expect("first layer execution error!");
if start == end {
return Ok(vec![Transition {
before: None,
after: LayerResult {
layer: last_layer.clone(),
result: start,
},
}]);
}
bisect(
Vec::from(&layers[1..layers.len() - 1]),
LayerResult {
layer: first_layer.clone(),
result: start,
},
LayerResult {
layer: last_layer.clone(),
result: end,
},
action,
)
}
fn bisect<T>(
history: Vec<Layer>,
start: LayerResult,
end: LayerResult,
action: &T,
) -> Result<Vec<Transition>, Error>
where
T: ContainerAction +'static,
{
let size = history.len();
if size == 0 {
if start.result == end.result {
return Err(Error::new(std::io::ErrorKind::Other, ""));
}
return Ok(vec![Transition {
before: Some(start.clone()),
after: end.clone(),
}]);
}
let half = size / 2;
let mid_result = LayerResult {
layer: history[half].clone(),
result: action.try_container(&history[half].image_name),
};
if size == 1 {
let mut results = Vec::<Transition>::new();
if *start.result!= mid_result.result {
results.push(Transition {
before: Some(start.clone()),
after: mid_result.clone(),
});
}
if mid_result.result!= *end.result {
results.push(Transition {
before: Some(mid_result),
after: end.clone(),
});
}
return Ok(results);
}
if start.result == mid_result.result {
action.skip((mid_result.layer.height - start.layer.height) as u64);
return bisect(Vec::from(&history[half + 1..]), mid_result, end, action);
}
if mid_result.result == end.result {
action.skip((end.layer.height - mid_result.layer.height) as u64);
return bisect(Vec::from(&history[..half]), start, mid_result, action);
}
let clone_a = action.clone();
let clone_b = action.clone();
let mid_result_c = mid_result.clone();
let hist_a = Vec::from(&history[..half]);
let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a));
let right_handle =
thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b));
let mut left_results: Vec<Transition> = left_handle
.join()
.expect("left")
.expect("left transition err");
let right_results: Vec<Transition> = right_handle
.join()
.expect("right")
.expect("right transition err");
left_results.extend(right_results); // These results are sorted later...
Ok(left_results)
}
trait ContainerAction: Clone + Send {
fn try_container(&self, container_id: &str) -> String;
fn skip(&self, count: u64) -> ();
}
#[derive(Clone)]
struct DockerContainer {
pb: Arc<ProgressBar>,
command_line: Vec<String>,
timeout_in_seconds: usize,
}
impl DockerContainer {
fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer {
let pb = Arc::new(ProgressBar::new(total));
DockerContainer {
pb,
command_line,
timeout_in_seconds,
}
}
}
struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize }
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
unsafe { self.buf.set_len(self.len); }
}
}
impl ContainerAction for DockerContainer {
fn try_container(&self, container_id: &str) -> String {
let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?");
let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string();
//Create container
let mut create = ContainerCreateOptions::new(&container_id);
let mut host_config = ContainerHostConfig::new();
host_config.auto_remove(false);
create.host_config(host_config);
let it = self.command_line.iter();
for command in it {
create.cmd(command.clone());
}
let container: CreateContainerResponse = docker
.create_container(Some(&container_name), &create)
.expect("couldn't create container");
let result = docker.start_container(&container.id);
if result.is_err() {
let err: dockworker::errors::Error = result.unwrap_err();
return format!("{}", err);
}
let log_options = ContainerLogOptions {
stdout: true,
stderr: true,
since: None,
timestamps: None,
tail: None,
follow: true,
};
let timeout = Duration::from_secs(self.timeout_in_seconds as u64);
let mut container_output = String::new();
let now = SystemTime::now();
let timeout_time = now + timeout;
let result = docker.log_container(&container_name, &log_options);
if let Ok(result) = result {
let mut r = result;
let reservation_size = 32;
let mut buf = Vec::<u8>::new();
{
let mut g = Guard { len: buf.len(), buf: &mut buf };
loop {
if g.len == g.buf.len() {
g.buf.resize(g.len + reservation_size, 0);
}
match r.read(&mut g.buf[g.len..]) {
Ok(0) => { break; }
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(_e) => { break; }
}
if SystemTime::now() > timeout_time {
break;
}
}
}
container_output = String::from_utf8_lossy(&buf).to_string();
}
self.pb.inc(1);
let _stop_result = docker.stop_container(&container.id, timeout);
container_output
}
fn skip(&self, count: u64) -> () {
self.pb.inc(count);
}
}
/// Struct to hold parameters.
pub struct BisectOptions {
pub timeout_in_seconds: usize,
pub trunc_size: usize,
}
/// Create containers based on layers and run command_line against them.
/// Result is the differences in std out and std err.
pub fn try_bisect(
histories: &Vec<ImageLayer>,
command_line: Vec<String>,
options: BisectOptions,
) -> Result<Vec<Transition>, Error> {
println!(
"\n{}\n\n{:?}\n",
"Command to apply to layers:".bold(),
&command_line
);
let create_and_try_container = DockerContainer::new(
histories.len() as u64,
command_line,
options.timeout_in_seconds,
);
println!("{}", "Skipped missing layers:".bold());
println!();
let mut layers = Vec::new();
for (index, event) in histories.iter().rev().enumerate() {
let mut created = event.created_by.clone();
created = truncate(&created, options.trunc_size).to_string();
match event.id.clone() {
Some(layer_name) => layers.push(Layer {
height: index,
image_name: layer_name,
creation_command: event.created_by.clone(),
}),
None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)),
}
}
println!();
println!(
"{}",
"Bisecting found layers (running command on the layers) ==>\n".bold()
);
if layers.len() < 2 {
println!();
eprintln!(
"{} layers found in cache - not enough layers to bisect.",
layers.len()
);
return Err(Error::new(
std::io::ErrorKind::Other,
"no cached layers found!",
));
}
let results = get_changes(layers, &create_and_try_container);
create_and_try_container.pb.finish_with_message("done");
results
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[derive(Clone)]
struct MapAction {
map: HashMap<String, String>,
}
impl MapAction {
fn new(from: Vec<usize>, to: Vec<&str>) -> Self {
let mut object = MapAction {
map: HashMap::new(),
};
for (f, t) in from.iter().zip(to.iter()) {
object.map.insert(f.to_string(), t.to_string());
}
object
}
}
impl ContainerAction for MapAction {
fn try_container(&self, container_id: &str) -> String {
let none = String::new();
let result: &String = self.map.get(container_id).unwrap_or(&none);
result.clone()
}
fn skip(&self, _count: u64) -> ()
|
}
fn lay(id: usize) -> Layer {
Layer {
height: id,
image_name: id.to_string(),
creation_command: id.to_string(),
}
}
#[test]
fn if_output_always_same_return_earliest_command() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "A"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: None,
after: LayerResult {
layer: lay(3),
result: "A".to_string()
},
}]
);
}
#[test]
fn if_one_difference_show_command_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3)],
&MapAction::new(vec![1, 2, 3], vec!["A", "A", "B"]),
);
assert_eq!(
results.unwrap(),
vec![Transition {
before: Some(LayerResult {
layer: lay(2),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(3),
result: "B".to_string()
},
}]
);
}
#[test]
fn if_two_differences_show_two_commands_that_made_difference() {
let results = get_changes(
vec![lay(1), lay(2), lay(3), lay(4)],
&MapAction::new(vec![1, 2, 3, 4], vec!["A", "B", "B", "C"]),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
}
]
);
}
#[test]
fn three_transitions() {
let results = get_changes(
vec![
lay(1),
lay(2),
lay(3),
lay(4),
lay(5),
lay(6),
lay(7),
lay(8),
lay(9),
lay(10),
],
&MapAction::new(
vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
vec!["A", "B", "B", "C", "C", "C", "C", "C", "D", "D"],
),
);
let res = results.unwrap();
assert_eq!(
res,
vec![
Transition {
before: Some(LayerResult {
layer: lay(1),
result: "A".to_string()
}),
after: LayerResult {
layer: lay(2),
result: "B".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(3),
result: "B".to_string()
}),
after: LayerResult {
layer: lay(4),
result: "C".to_string()
},
},
Transition {
before: Some(LayerResult {
layer: lay(8),
result: "C".to_string()
}),
after: LayerResult {
layer: lay(9),
result: "D".to_string()
},
}
]
);
}
}
|
{}
|
identifier_body
|
lib.rs
|
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
/// `InputCellID` is a unique identifier for an input cell.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct InputCellID(usize);
/// `ComputeCellID` is a unique identifier for a compute cell.
/// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable,
/// demonstrated by the following tests:
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input: react::ComputeCellID = r.create_input(111);
/// ```
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ComputeCellID(usize);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct CallbackID(usize);
pub type Callback<'reactor, T> = RefCell<Box<dyn'reactor + FnMut(T)>>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
struct InputCell<T> {
clients: HashSet<ComputeCellID>,
value: T,
}
impl<T: Copy + Debug + PartialEq> InputCell<T> {
pub fn new(init: T) -> Self {
InputCell {
clients: HashSet::new(),
value: init,
}
}
}
struct ComputeCell<'r, T: Debug> {
fun: Box<dyn 'r + Fn(&[T]) -> T>,
deps: Vec<CellID>,
callbacks: HashMap<CallbackID, Callback<'r, T>>,
prev_val: Cell<Option<T>>,
next_cbid: usize, // increases monotonically; increments on adding a callback
clients: HashSet<ComputeCellID>,
}
impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> {
pub fn new<F>(fun: F, deps: &[CellID]) -> Self
where
F: 'r + Fn(&[T]) -> T,
{
ComputeCell {
fun: Box::new(fun),
deps: deps.to_vec(),
callbacks: HashMap::new(),
prev_val: Cell::new(None),
next_cbid: 0,
clients: HashSet::new(),
}
}
pub fn call(&self, reactor: &Reactor<'r, T>) -> T {
let deps = self
.deps
.iter()
.map(|c| reactor.value(*c).unwrap())
.collect::<Vec<T>>();
let nv = (self.fun)(&deps);
let mut fire_callbacks = false;
if let Some(pv) = self.prev_val.get() {
if nv!= pv {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
} else {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
if fire_callbacks {
for c in self.callbacks.values() {
(&mut *c.borrow_mut())(nv);
}
}
nv
}
}
#[derive(Default)]
pub struct Reactor<'r, T: Debug> {
input_cells: Vec<InputCell<T>>,
compute_cells: Vec<ComputeCell<'r, T>>,
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> {
pub fn new() -> Self {
Reactor {
input_cells: Vec::new(),
compute_cells: Vec::new(),
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, initial: T) -> InputCellID {
let idx = self.input_cells.len();
let id = InputCellID(idx);
self.input_cells.push(InputCell::new(initial));
id
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F>(
&mut self,
dependencies: &[CellID],
compute_func: F,
) -> Result<ComputeCellID, CellID>
where
F: 'r + Fn(&[T]) -> T,
{
let cidx = self.compute_cells.len();
let cid = ComputeCellID(cidx);
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
if *idx >= self.input_cells.len() {
return Err(*id);
}
}
CellID::Compute(ComputeCellID(idx)) => {
if *idx >= self.compute_cells.len() {
return Err(*id);
}
}
}
}
// register as clients with all dependencies.
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
let _ = self.input_cells[*idx].clients.insert(cid);
}
CellID::Compute(ComputeCellID(idx)) => {
let _ = self.compute_cells[*idx].clients.insert(cid);
}
}
}
let cell = ComputeCell::new(compute_func, dependencies);
cell.call(&self); // set the initial value
self.compute_cells.push(cell);
Ok(cid)
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
match id {
CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value),
CellID::Compute(ComputeCellID(idx)) => {
if let Some(cell) = self.compute_cells.get(idx) {
Some(cell.call(&self))
} else {
None
}
}
|
}
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool {
let InputCellID(idx) = id;
if idx < self.input_cells.len() {
let old_value = self.input_cells[idx].value;
if old_value == new_value {
return true;
}
self.input_cells[idx].value = new_value;
let mut clients1 = self.input_cells[idx].clients.clone();
let mut clients2 = HashSet::new();
let mut done = false;
// Recursively iterate through all clients until we've converged on the
// the stable set of them. Does at least N extra checks, where N is
// the numer of ultimate clients.
while!done {
for client in clients1.iter() {
clients2.insert(client.clone());
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
// first find all the clients that will be called without us
clients2.extend(cell.clients.iter());
}
for client in clients2.iter() {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
clients1.extend(cell.clients.iter());
}
done = clients1 == clients2;
}
// This has the potential to call more clients than needed, but ComputeCells
// cache their previous value and only invoke their callbacks on change,
// so client callbacks won't get invoked more than once.
//
// There's an implicit assumption here that each ComputeCell's function is
// cheap to run, which is probably not true in general. We could do a
// topological sort of the client graph to ensure we only call leaf nodes.
for client in clients1 {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[idx];
cell.call(&self);
}
// we have set a new value and called all clients, return true
true
} else {
// the new value was the same as the old value, return false
false
}
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each be called:
// * Zero times if the compute cell's value did not change as a result of the set_value call.
// * Exactly once if the compute cell's value changed as a result of the set_value call.
// The value passed to the callback should be the final value of the compute cell after the
// set_value call.
pub fn add_callback<F: 'r + FnMut(T) -> ()>(
&mut self,
id: ComputeCellID,
callback: F,
) -> Option<CallbackID> {
let ComputeCellID(idx) = id;
if idx >= self.compute_cells.len() {
return None;
}
let cidx = self.compute_cells[idx].next_cbid.to_owned();
self.compute_cells[idx].next_cbid += 1;
let cid = CallbackID(cidx);
self.compute_cells[idx]
.callbacks
.insert(cid, RefCell::new(Box::new(callback)));
Some(cid)
}
// Removes the specified callback, using an ID returned from add_callback.
//
// Returns an Err if either the cell or callback does not exist.
//
// A removed callback should no longer be called.
pub fn remove_callback(
&mut self,
cell: ComputeCellID,
callback: CallbackID,
) -> Result<(), RemoveCallbackError> {
let ComputeCellID(idx) = cell;
if let Some(compute_cell) = self.compute_cells.get_mut(idx) {
if compute_cell.callbacks.remove(&callback).is_some() {
return Ok(());
} else {
return Err(RemoveCallbackError::NonexistentCallback);
}
} else {
Err(RemoveCallbackError::NonexistentCell)
}
}
}
|
}
|
random_line_split
|
lib.rs
|
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
/// `InputCellID` is a unique identifier for an input cell.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct InputCellID(usize);
/// `ComputeCellID` is a unique identifier for a compute cell.
/// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable,
/// demonstrated by the following tests:
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input: react::ComputeCellID = r.create_input(111);
/// ```
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ComputeCellID(usize);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct CallbackID(usize);
pub type Callback<'reactor, T> = RefCell<Box<dyn'reactor + FnMut(T)>>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
struct InputCell<T> {
clients: HashSet<ComputeCellID>,
value: T,
}
impl<T: Copy + Debug + PartialEq> InputCell<T> {
pub fn new(init: T) -> Self
|
}
struct ComputeCell<'r, T: Debug> {
fun: Box<dyn 'r + Fn(&[T]) -> T>,
deps: Vec<CellID>,
callbacks: HashMap<CallbackID, Callback<'r, T>>,
prev_val: Cell<Option<T>>,
next_cbid: usize, // increases monotonically; increments on adding a callback
clients: HashSet<ComputeCellID>,
}
impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> {
pub fn new<F>(fun: F, deps: &[CellID]) -> Self
where
F: 'r + Fn(&[T]) -> T,
{
ComputeCell {
fun: Box::new(fun),
deps: deps.to_vec(),
callbacks: HashMap::new(),
prev_val: Cell::new(None),
next_cbid: 0,
clients: HashSet::new(),
}
}
pub fn call(&self, reactor: &Reactor<'r, T>) -> T {
let deps = self
.deps
.iter()
.map(|c| reactor.value(*c).unwrap())
.collect::<Vec<T>>();
let nv = (self.fun)(&deps);
let mut fire_callbacks = false;
if let Some(pv) = self.prev_val.get() {
if nv!= pv {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
} else {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
if fire_callbacks {
for c in self.callbacks.values() {
(&mut *c.borrow_mut())(nv);
}
}
nv
}
}
#[derive(Default)]
pub struct Reactor<'r, T: Debug> {
input_cells: Vec<InputCell<T>>,
compute_cells: Vec<ComputeCell<'r, T>>,
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> {
pub fn new() -> Self {
Reactor {
input_cells: Vec::new(),
compute_cells: Vec::new(),
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, initial: T) -> InputCellID {
let idx = self.input_cells.len();
let id = InputCellID(idx);
self.input_cells.push(InputCell::new(initial));
id
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F>(
&mut self,
dependencies: &[CellID],
compute_func: F,
) -> Result<ComputeCellID, CellID>
where
F: 'r + Fn(&[T]) -> T,
{
let cidx = self.compute_cells.len();
let cid = ComputeCellID(cidx);
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
if *idx >= self.input_cells.len() {
return Err(*id);
}
}
CellID::Compute(ComputeCellID(idx)) => {
if *idx >= self.compute_cells.len() {
return Err(*id);
}
}
}
}
// register as clients with all dependencies.
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
let _ = self.input_cells[*idx].clients.insert(cid);
}
CellID::Compute(ComputeCellID(idx)) => {
let _ = self.compute_cells[*idx].clients.insert(cid);
}
}
}
let cell = ComputeCell::new(compute_func, dependencies);
cell.call(&self); // set the initial value
self.compute_cells.push(cell);
Ok(cid)
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
match id {
CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value),
CellID::Compute(ComputeCellID(idx)) => {
if let Some(cell) = self.compute_cells.get(idx) {
Some(cell.call(&self))
} else {
None
}
}
}
}
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool {
let InputCellID(idx) = id;
if idx < self.input_cells.len() {
let old_value = self.input_cells[idx].value;
if old_value == new_value {
return true;
}
self.input_cells[idx].value = new_value;
let mut clients1 = self.input_cells[idx].clients.clone();
let mut clients2 = HashSet::new();
let mut done = false;
// Recursively iterate through all clients until we've converged on the
// the stable set of them. Does at least N extra checks, where N is
// the numer of ultimate clients.
while!done {
for client in clients1.iter() {
clients2.insert(client.clone());
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
// first find all the clients that will be called without us
clients2.extend(cell.clients.iter());
}
for client in clients2.iter() {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
clients1.extend(cell.clients.iter());
}
done = clients1 == clients2;
}
// This has the potential to call more clients than needed, but ComputeCells
// cache their previous value and only invoke their callbacks on change,
// so client callbacks won't get invoked more than once.
//
// There's an implicit assumption here that each ComputeCell's function is
// cheap to run, which is probably not true in general. We could do a
// topological sort of the client graph to ensure we only call leaf nodes.
for client in clients1 {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[idx];
cell.call(&self);
}
// we have set a new value and called all clients, return true
true
} else {
// the new value was the same as the old value, return false
false
}
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each be called:
// * Zero times if the compute cell's value did not change as a result of the set_value call.
// * Exactly once if the compute cell's value changed as a result of the set_value call.
// The value passed to the callback should be the final value of the compute cell after the
// set_value call.
pub fn add_callback<F: 'r + FnMut(T) -> ()>(
&mut self,
id: ComputeCellID,
callback: F,
) -> Option<CallbackID> {
let ComputeCellID(idx) = id;
if idx >= self.compute_cells.len() {
return None;
}
let cidx = self.compute_cells[idx].next_cbid.to_owned();
self.compute_cells[idx].next_cbid += 1;
let cid = CallbackID(cidx);
self.compute_cells[idx]
.callbacks
.insert(cid, RefCell::new(Box::new(callback)));
Some(cid)
}
// Removes the specified callback, using an ID returned from add_callback.
//
// Returns an Err if either the cell or callback does not exist.
//
// A removed callback should no longer be called.
pub fn remove_callback(
&mut self,
cell: ComputeCellID,
callback: CallbackID,
) -> Result<(), RemoveCallbackError> {
let ComputeCellID(idx) = cell;
if let Some(compute_cell) = self.compute_cells.get_mut(idx) {
if compute_cell.callbacks.remove(&callback).is_some() {
return Ok(());
} else {
return Err(RemoveCallbackError::NonexistentCallback);
}
} else {
Err(RemoveCallbackError::NonexistentCell)
}
}
}
|
{
InputCell {
clients: HashSet::new(),
value: init,
}
}
|
identifier_body
|
lib.rs
|
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
/// `InputCellID` is a unique identifier for an input cell.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct InputCellID(usize);
/// `ComputeCellID` is a unique identifier for a compute cell.
/// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable,
/// demonstrated by the following tests:
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input: react::ComputeCellID = r.create_input(111);
/// ```
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ComputeCellID(usize);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct CallbackID(usize);
pub type Callback<'reactor, T> = RefCell<Box<dyn'reactor + FnMut(T)>>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
struct InputCell<T> {
clients: HashSet<ComputeCellID>,
value: T,
}
impl<T: Copy + Debug + PartialEq> InputCell<T> {
pub fn new(init: T) -> Self {
InputCell {
clients: HashSet::new(),
value: init,
}
}
}
struct ComputeCell<'r, T: Debug> {
fun: Box<dyn 'r + Fn(&[T]) -> T>,
deps: Vec<CellID>,
callbacks: HashMap<CallbackID, Callback<'r, T>>,
prev_val: Cell<Option<T>>,
next_cbid: usize, // increases monotonically; increments on adding a callback
clients: HashSet<ComputeCellID>,
}
impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> {
pub fn new<F>(fun: F, deps: &[CellID]) -> Self
where
F: 'r + Fn(&[T]) -> T,
{
ComputeCell {
fun: Box::new(fun),
deps: deps.to_vec(),
callbacks: HashMap::new(),
prev_val: Cell::new(None),
next_cbid: 0,
clients: HashSet::new(),
}
}
pub fn call(&self, reactor: &Reactor<'r, T>) -> T {
let deps = self
.deps
.iter()
.map(|c| reactor.value(*c).unwrap())
.collect::<Vec<T>>();
let nv = (self.fun)(&deps);
let mut fire_callbacks = false;
if let Some(pv) = self.prev_val.get() {
if nv!= pv {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
} else {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
if fire_callbacks {
for c in self.callbacks.values() {
(&mut *c.borrow_mut())(nv);
}
}
nv
}
}
#[derive(Default)]
pub struct Reactor<'r, T: Debug> {
input_cells: Vec<InputCell<T>>,
compute_cells: Vec<ComputeCell<'r, T>>,
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> {
pub fn
|
() -> Self {
Reactor {
input_cells: Vec::new(),
compute_cells: Vec::new(),
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, initial: T) -> InputCellID {
let idx = self.input_cells.len();
let id = InputCellID(idx);
self.input_cells.push(InputCell::new(initial));
id
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F>(
&mut self,
dependencies: &[CellID],
compute_func: F,
) -> Result<ComputeCellID, CellID>
where
F: 'r + Fn(&[T]) -> T,
{
let cidx = self.compute_cells.len();
let cid = ComputeCellID(cidx);
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
if *idx >= self.input_cells.len() {
return Err(*id);
}
}
CellID::Compute(ComputeCellID(idx)) => {
if *idx >= self.compute_cells.len() {
return Err(*id);
}
}
}
}
// register as clients with all dependencies.
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
let _ = self.input_cells[*idx].clients.insert(cid);
}
CellID::Compute(ComputeCellID(idx)) => {
let _ = self.compute_cells[*idx].clients.insert(cid);
}
}
}
let cell = ComputeCell::new(compute_func, dependencies);
cell.call(&self); // set the initial value
self.compute_cells.push(cell);
Ok(cid)
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
match id {
CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value),
CellID::Compute(ComputeCellID(idx)) => {
if let Some(cell) = self.compute_cells.get(idx) {
Some(cell.call(&self))
} else {
None
}
}
}
}
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool {
let InputCellID(idx) = id;
if idx < self.input_cells.len() {
let old_value = self.input_cells[idx].value;
if old_value == new_value {
return true;
}
self.input_cells[idx].value = new_value;
let mut clients1 = self.input_cells[idx].clients.clone();
let mut clients2 = HashSet::new();
let mut done = false;
// Recursively iterate through all clients until we've converged on the
// the stable set of them. Does at least N extra checks, where N is
// the numer of ultimate clients.
while!done {
for client in clients1.iter() {
clients2.insert(client.clone());
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
// first find all the clients that will be called without us
clients2.extend(cell.clients.iter());
}
for client in clients2.iter() {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
clients1.extend(cell.clients.iter());
}
done = clients1 == clients2;
}
// This has the potential to call more clients than needed, but ComputeCells
// cache their previous value and only invoke their callbacks on change,
// so client callbacks won't get invoked more than once.
//
// There's an implicit assumption here that each ComputeCell's function is
// cheap to run, which is probably not true in general. We could do a
// topological sort of the client graph to ensure we only call leaf nodes.
for client in clients1 {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[idx];
cell.call(&self);
}
// we have set a new value and called all clients, return true
true
} else {
// the new value was the same as the old value, return false
false
}
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each be called:
// * Zero times if the compute cell's value did not change as a result of the set_value call.
// * Exactly once if the compute cell's value changed as a result of the set_value call.
// The value passed to the callback should be the final value of the compute cell after the
// set_value call.
pub fn add_callback<F: 'r + FnMut(T) -> ()>(
&mut self,
id: ComputeCellID,
callback: F,
) -> Option<CallbackID> {
let ComputeCellID(idx) = id;
if idx >= self.compute_cells.len() {
return None;
}
let cidx = self.compute_cells[idx].next_cbid.to_owned();
self.compute_cells[idx].next_cbid += 1;
let cid = CallbackID(cidx);
self.compute_cells[idx]
.callbacks
.insert(cid, RefCell::new(Box::new(callback)));
Some(cid)
}
// Removes the specified callback, using an ID returned from add_callback.
//
// Returns an Err if either the cell or callback does not exist.
//
// A removed callback should no longer be called.
pub fn remove_callback(
&mut self,
cell: ComputeCellID,
callback: CallbackID,
) -> Result<(), RemoveCallbackError> {
let ComputeCellID(idx) = cell;
if let Some(compute_cell) = self.compute_cells.get_mut(idx) {
if compute_cell.callbacks.remove(&callback).is_some() {
return Ok(());
} else {
return Err(RemoveCallbackError::NonexistentCallback);
}
} else {
Err(RemoveCallbackError::NonexistentCell)
}
}
}
|
new
|
identifier_name
|
wasitests.rs
|
//! This file will run at build time to autogenerate the WASI regression tests
//! It will compile the files indicated in TESTS, to:executable and.wasm
//! - Compile with the native rust target to get the expected output
//! - Compile with the latest WASI target to get the wasm
//! - Generate the test that will compare the output of running the.wasm file
//! with wasmer with the expected output
use glob::glob;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::io;
use std::io::prelude::*;
|
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NativeOutput {
stdout: String,
stderr: String,
result: i64,
}
/// Compile and execute the test file as native code, saving the results to be
/// compared against later.
///
/// This function attempts to clean up its output after it executes it.
fn generate_native_output(
temp_dir: &Path,
file: &str,
normalized_name: &str,
args: &[String],
options: &WasiOptions,
) -> io::Result<NativeOutput> {
let executable_path = temp_dir.join(normalized_name);
println!(
"Compiling program {} to native at {}",
file,
executable_path.to_string_lossy()
);
let native_out = Command::new("rustc")
.arg(file)
.arg("-o")
.args(args)
.arg(&executable_path)
.output()
.expect("Failed to compile program to native code");
util::print_info_on_error(&native_out, "COMPILATION FAILED");
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perm = executable_path
.metadata()
.expect("native executable")
.permissions();
perm.set_mode(0o766);
println!(
"Setting execute permissions on {}",
executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if!result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if!specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else {
compile(temp_dir.path(), test, wasi_versions);
}
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String {
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if!self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if!self.options.args.is_empty() {
let args = self
.options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if!self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (preopens {})", preopens);
}
if!self.options.mapdir.is_empty() {
let map_dirs = self
.options
.mapdir
.iter()
.map(|(a, b)| format!("\"{}:{}\"", a, b))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (map_dirs {})", map_dirs);
}
if!self.options.tempdir.is_empty() {
let temp_dirs = self
.options
.tempdir
.iter()
.map(|td| format!("\"{}\"", td))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (temp_dirs {})", temp_dirs);
}
let _ = write!(out, "\n (assert_return (i64.const {}))", self.result);
if let Some(stdin) = &self.options.stdin {
let _ = write!(out, "\n (stdin {:?})", stdin);
}
if!self.stdout.is_empty() {
let _ = write!(out, "\n (assert_stdout {:?})", self.stdout);
}
if!self.stderr.is_empty() {
let _ = write!(out, "\n (assert_stderr {:?})", self.stderr);
}
let _ = write!(out, "\n)\n");
out
}
}
/// The options provied when executed a WASI Wasm program
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiOptions {
/// Mapped pre-opened dirs
pub mapdir: Vec<(String, String)>,
/// Environment vars
pub env: Vec<(String, String)>,
/// Program arguments
pub args: Vec<String>,
/// Pre-opened directories
pub dir: Vec<String>,
/// The alias of the temporary directory to use
pub tempdir: Vec<String>,
/// Stdin to give to the native program and WASI program.
pub stdin: Option<String>,
}
/// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:"
fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> {
if source_code.starts_with("// WASI:") {
let mut args = WasiOptions::default();
for arg_line in source_code
.lines()
.skip(1)
.take_while(|line| line.starts_with("// "))
{
let arg_line = arg_line.strip_prefix("// ").unwrap();
let arg_line = arg_line.trim();
let colon_idx = arg_line
.find(':')
.expect("directives provided at the top must be separated by a `:`");
let (command_name, value) = arg_line.split_at(colon_idx);
let value = value.strip_prefix(':').unwrap();
let value = value.trim();
match command_name {
"mapdir" =>
// We try first splitting by `::`
{
if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] {
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] {
// And then we try splitting by `:` (for compatibility with previous API)
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else {
eprintln!("Parse error in mapdir {} not parsed correctly", value);
}
}
"env" => {
if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] {
args.env.push((name.to_string(), val.to_string()));
} else {
eprintln!("Parse error in env {} not parsed correctly", value);
}
}
"dir" => {
args.dir.push(value.to_string());
}
"arg" => {
args.args.push(value.to_string());
}
"tempdir" => {
args.tempdir.push(value.to_string());
}
"stdin" => {
assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code");
let s = value;
let s = s.strip_prefix('"').expect("expected leading '\"' in stdin");
let s = s
.trim_end()
.strip_suffix('\"')
.expect("expected trailing '\"' in stdin");
args.stdin = Some(s.to_string());
}
e => {
eprintln!("WARN: comment arg: `{}` is not supported", e);
}
}
}
return Some(args);
}
None
}
|
use super::util;
use super::wasi_version::*;
|
random_line_split
|
wasitests.rs
|
//! This file will run at build time to autogenerate the WASI regression tests
//! It will compile the files indicated in TESTS, to:executable and.wasm
//! - Compile with the native rust target to get the expected output
//! - Compile with the latest WASI target to get the wasm
//! - Generate the test that will compare the output of running the.wasm file
//! with wasmer with the expected output
use glob::glob;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::io;
use std::io::prelude::*;
use super::util;
use super::wasi_version::*;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NativeOutput {
stdout: String,
stderr: String,
result: i64,
}
/// Compile and execute the test file as native code, saving the results to be
/// compared against later.
///
/// This function attempts to clean up its output after it executes it.
fn generate_native_output(
temp_dir: &Path,
file: &str,
normalized_name: &str,
args: &[String],
options: &WasiOptions,
) -> io::Result<NativeOutput> {
let executable_path = temp_dir.join(normalized_name);
println!(
"Compiling program {} to native at {}",
file,
executable_path.to_string_lossy()
);
let native_out = Command::new("rustc")
.arg(file)
.arg("-o")
.args(args)
.arg(&executable_path)
.output()
.expect("Failed to compile program to native code");
util::print_info_on_error(&native_out, "COMPILATION FAILED");
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perm = executable_path
.metadata()
.expect("native executable")
.permissions();
perm.set_mode(0o766);
println!(
"Setting execute permissions on {}",
executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if!result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if!specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else
|
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String {
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if!self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if!self.options.args.is_empty() {
let args = self
.options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if!self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (preopens {})", preopens);
}
if!self.options.mapdir.is_empty() {
let map_dirs = self
.options
.mapdir
.iter()
.map(|(a, b)| format!("\"{}:{}\"", a, b))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (map_dirs {})", map_dirs);
}
if!self.options.tempdir.is_empty() {
let temp_dirs = self
.options
.tempdir
.iter()
.map(|td| format!("\"{}\"", td))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (temp_dirs {})", temp_dirs);
}
let _ = write!(out, "\n (assert_return (i64.const {}))", self.result);
if let Some(stdin) = &self.options.stdin {
let _ = write!(out, "\n (stdin {:?})", stdin);
}
if!self.stdout.is_empty() {
let _ = write!(out, "\n (assert_stdout {:?})", self.stdout);
}
if!self.stderr.is_empty() {
let _ = write!(out, "\n (assert_stderr {:?})", self.stderr);
}
let _ = write!(out, "\n)\n");
out
}
}
/// The options provied when executed a WASI Wasm program
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiOptions {
/// Mapped pre-opened dirs
pub mapdir: Vec<(String, String)>,
/// Environment vars
pub env: Vec<(String, String)>,
/// Program arguments
pub args: Vec<String>,
/// Pre-opened directories
pub dir: Vec<String>,
/// The alias of the temporary directory to use
pub tempdir: Vec<String>,
/// Stdin to give to the native program and WASI program.
pub stdin: Option<String>,
}
/// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:"
fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> {
if source_code.starts_with("// WASI:") {
let mut args = WasiOptions::default();
for arg_line in source_code
.lines()
.skip(1)
.take_while(|line| line.starts_with("// "))
{
let arg_line = arg_line.strip_prefix("// ").unwrap();
let arg_line = arg_line.trim();
let colon_idx = arg_line
.find(':')
.expect("directives provided at the top must be separated by a `:`");
let (command_name, value) = arg_line.split_at(colon_idx);
let value = value.strip_prefix(':').unwrap();
let value = value.trim();
match command_name {
"mapdir" =>
// We try first splitting by `::`
{
if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] {
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] {
// And then we try splitting by `:` (for compatibility with previous API)
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else {
eprintln!("Parse error in mapdir {} not parsed correctly", value);
}
}
"env" => {
if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] {
args.env.push((name.to_string(), val.to_string()));
} else {
eprintln!("Parse error in env {} not parsed correctly", value);
}
}
"dir" => {
args.dir.push(value.to_string());
}
"arg" => {
args.args.push(value.to_string());
}
"tempdir" => {
args.tempdir.push(value.to_string());
}
"stdin" => {
assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code");
let s = value;
let s = s.strip_prefix('"').expect("expected leading '\"' in stdin");
let s = s
.trim_end()
.strip_suffix('\"')
.expect("expected trailing '\"' in stdin");
args.stdin = Some(s.to_string());
}
e => {
eprintln!("WARN: comment arg: `{}` is not supported", e);
}
}
}
return Some(args);
}
None
}
|
{
compile(temp_dir.path(), test, wasi_versions);
}
|
conditional_block
|
wasitests.rs
|
//! This file will run at build time to autogenerate the WASI regression tests
//! It will compile the files indicated in TESTS, to:executable and.wasm
//! - Compile with the native rust target to get the expected output
//! - Compile with the latest WASI target to get the wasm
//! - Generate the test that will compare the output of running the.wasm file
//! with wasmer with the expected output
use glob::glob;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::io;
use std::io::prelude::*;
use super::util;
use super::wasi_version::*;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NativeOutput {
stdout: String,
stderr: String,
result: i64,
}
/// Compile and execute the test file as native code, saving the results to be
/// compared against later.
///
/// This function attempts to clean up its output after it executes it.
fn generate_native_output(
temp_dir: &Path,
file: &str,
normalized_name: &str,
args: &[String],
options: &WasiOptions,
) -> io::Result<NativeOutput> {
let executable_path = temp_dir.join(normalized_name);
println!(
"Compiling program {} to native at {}",
file,
executable_path.to_string_lossy()
);
let native_out = Command::new("rustc")
.arg(file)
.arg("-o")
.args(args)
.arg(&executable_path)
.output()
.expect("Failed to compile program to native code");
util::print_info_on_error(&native_out, "COMPILATION FAILED");
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perm = executable_path
.metadata()
.expect("native executable")
.permissions();
perm.set_mode(0o766);
println!(
"Setting execute permissions on {}",
executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if!result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn
|
(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if!specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else {
compile(temp_dir.path(), test, wasi_versions);
}
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String {
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if!self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if!self.options.args.is_empty() {
let args = self
.options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if!self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (preopens {})", preopens);
}
if!self.options.mapdir.is_empty() {
let map_dirs = self
.options
.mapdir
.iter()
.map(|(a, b)| format!("\"{}:{}\"", a, b))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (map_dirs {})", map_dirs);
}
if!self.options.tempdir.is_empty() {
let temp_dirs = self
.options
.tempdir
.iter()
.map(|td| format!("\"{}\"", td))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (temp_dirs {})", temp_dirs);
}
let _ = write!(out, "\n (assert_return (i64.const {}))", self.result);
if let Some(stdin) = &self.options.stdin {
let _ = write!(out, "\n (stdin {:?})", stdin);
}
if!self.stdout.is_empty() {
let _ = write!(out, "\n (assert_stdout {:?})", self.stdout);
}
if!self.stderr.is_empty() {
let _ = write!(out, "\n (assert_stderr {:?})", self.stderr);
}
let _ = write!(out, "\n)\n");
out
}
}
/// The options provied when executed a WASI Wasm program
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiOptions {
/// Mapped pre-opened dirs
pub mapdir: Vec<(String, String)>,
/// Environment vars
pub env: Vec<(String, String)>,
/// Program arguments
pub args: Vec<String>,
/// Pre-opened directories
pub dir: Vec<String>,
/// The alias of the temporary directory to use
pub tempdir: Vec<String>,
/// Stdin to give to the native program and WASI program.
pub stdin: Option<String>,
}
/// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:"
fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> {
if source_code.starts_with("// WASI:") {
let mut args = WasiOptions::default();
for arg_line in source_code
.lines()
.skip(1)
.take_while(|line| line.starts_with("// "))
{
let arg_line = arg_line.strip_prefix("// ").unwrap();
let arg_line = arg_line.trim();
let colon_idx = arg_line
.find(':')
.expect("directives provided at the top must be separated by a `:`");
let (command_name, value) = arg_line.split_at(colon_idx);
let value = value.strip_prefix(':').unwrap();
let value = value.trim();
match command_name {
"mapdir" =>
// We try first splitting by `::`
{
if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] {
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] {
// And then we try splitting by `:` (for compatibility with previous API)
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else {
eprintln!("Parse error in mapdir {} not parsed correctly", value);
}
}
"env" => {
if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] {
args.env.push((name.to_string(), val.to_string()));
} else {
eprintln!("Parse error in env {} not parsed correctly", value);
}
}
"dir" => {
args.dir.push(value.to_string());
}
"arg" => {
args.args.push(value.to_string());
}
"tempdir" => {
args.tempdir.push(value.to_string());
}
"stdin" => {
assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code");
let s = value;
let s = s.strip_prefix('"').expect("expected leading '\"' in stdin");
let s = s
.trim_end()
.strip_suffix('\"')
.expect("expected trailing '\"' in stdin");
args.stdin = Some(s.to_string());
}
e => {
eprintln!("WARN: comment arg: `{}` is not supported", e);
}
}
}
return Some(args);
}
None
}
|
compile
|
identifier_name
|
wasitests.rs
|
//! This file will run at build time to autogenerate the WASI regression tests
//! It will compile the files indicated in TESTS, to:executable and.wasm
//! - Compile with the native rust target to get the expected output
//! - Compile with the latest WASI target to get the wasm
//! - Generate the test that will compare the output of running the.wasm file
//! with wasmer with the expected output
use glob::glob;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::io;
use std::io::prelude::*;
use super::util;
use super::wasi_version::*;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NativeOutput {
stdout: String,
stderr: String,
result: i64,
}
/// Compile and execute the test file as native code, saving the results to be
/// compared against later.
///
/// This function attempts to clean up its output after it executes it.
fn generate_native_output(
temp_dir: &Path,
file: &str,
normalized_name: &str,
args: &[String],
options: &WasiOptions,
) -> io::Result<NativeOutput> {
let executable_path = temp_dir.join(normalized_name);
println!(
"Compiling program {} to native at {}",
file,
executable_path.to_string_lossy()
);
let native_out = Command::new("rustc")
.arg(file)
.arg("-o")
.args(args)
.arg(&executable_path)
.output()
.expect("Failed to compile program to native code");
util::print_info_on_error(&native_out, "COMPILATION FAILED");
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perm = executable_path
.metadata()
.expect("native executable")
.permissions();
perm.set_mode(0o766);
println!(
"Setting execute permissions on {}",
executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if!result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if!out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if!specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else {
compile(temp_dir.path(), test, wasi_versions);
}
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String
|
.options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if!self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (preopens {})", preopens);
}
if!self.options.mapdir.is_empty() {
let map_dirs = self
.options
.mapdir
.iter()
.map(|(a, b)| format!("\"{}:{}\"", a, b))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (map_dirs {})", map_dirs);
}
if!self.options.tempdir.is_empty() {
let temp_dirs = self
.options
.tempdir
.iter()
.map(|td| format!("\"{}\"", td))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (temp_dirs {})", temp_dirs);
}
let _ = write!(out, "\n (assert_return (i64.const {}))", self.result);
if let Some(stdin) = &self.options.stdin {
let _ = write!(out, "\n (stdin {:?})", stdin);
}
if!self.stdout.is_empty() {
let _ = write!(out, "\n (assert_stdout {:?})", self.stdout);
}
if!self.stderr.is_empty() {
let _ = write!(out, "\n (assert_stderr {:?})", self.stderr);
}
let _ = write!(out, "\n)\n");
out
}
}
/// The options provied when executed a WASI Wasm program
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiOptions {
/// Mapped pre-opened dirs
pub mapdir: Vec<(String, String)>,
/// Environment vars
pub env: Vec<(String, String)>,
/// Program arguments
pub args: Vec<String>,
/// Pre-opened directories
pub dir: Vec<String>,
/// The alias of the temporary directory to use
pub tempdir: Vec<String>,
/// Stdin to give to the native program and WASI program.
pub stdin: Option<String>,
}
/// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:"
fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> {
if source_code.starts_with("// WASI:") {
let mut args = WasiOptions::default();
for arg_line in source_code
.lines()
.skip(1)
.take_while(|line| line.starts_with("// "))
{
let arg_line = arg_line.strip_prefix("// ").unwrap();
let arg_line = arg_line.trim();
let colon_idx = arg_line
.find(':')
.expect("directives provided at the top must be separated by a `:`");
let (command_name, value) = arg_line.split_at(colon_idx);
let value = value.strip_prefix(':').unwrap();
let value = value.trim();
match command_name {
"mapdir" =>
// We try first splitting by `::`
{
if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] {
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] {
// And then we try splitting by `:` (for compatibility with previous API)
args.mapdir.push((alias.to_string(), real_dir.to_string()));
} else {
eprintln!("Parse error in mapdir {} not parsed correctly", value);
}
}
"env" => {
if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] {
args.env.push((name.to_string(), val.to_string()));
} else {
eprintln!("Parse error in env {} not parsed correctly", value);
}
}
"dir" => {
args.dir.push(value.to_string());
}
"arg" => {
args.args.push(value.to_string());
}
"tempdir" => {
args.tempdir.push(value.to_string());
}
"stdin" => {
assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code");
let s = value;
let s = s.strip_prefix('"').expect("expected leading '\"' in stdin");
let s = s
.trim_end()
.strip_suffix('\"')
.expect("expected trailing '\"' in stdin");
args.stdin = Some(s.to_string());
}
e => {
eprintln!("WARN: comment arg: `{}` is not supported", e);
}
}
}
return Some(args);
}
None
}
|
{
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if !self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if !self.options.args.is_empty() {
let args = self
|
identifier_body
|
test.rs
|
use super::{cmd, Expression};
use std;
use std::collections::HashMap;
use std::env;
use std::env::consts::EXE_EXTENSION;
use std::ffi::OsString;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::str;
use std::sync::{Arc, Once};
// Include a copy of the sh function, because we have a lot of old tests that
// use it, and it's a lot easier than managing a circular dependency between
// duct and duct_sh.
pub fn sh(command: &'static str) -> Expression {
let argv = shell_command_argv(command.into());
cmd(&argv[0], &argv[1..])
}
#[cfg(unix)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
vec!["/bin/sh".into(), "-c".into(), command]
}
#[cfg(windows)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into());
vec![comspec, "/C".into(), command]
}
pub fn path_to_exe(name: &str) -> PathBuf {
// This project defines some associated binaries for testing, and we shell out to them in
// these tests. `cargo test` doesn't automatically build associated binaries, so this
// function takes care of building them explicitly.
static CARGO_BUILD_ONCE: Once = Once::new();
CARGO_BUILD_ONCE.call_once(|| {
let build_status = Command::new("cargo")
.arg("build")
.arg("--quiet")
.status()
.unwrap();
assert!(
build_status.success(),
"Cargo failed to build associated binaries."
);
});
Path::new("target")
.join("debug")
.join(name)
.with_extension(EXE_EXTENSION)
}
pub fn true_cmd() -> Expression {
cmd!(path_to_exe("status"), "0")
}
fn false_cmd() -> Expression {
cmd!(path_to_exe("status"), "1")
}
#[test]
fn test_cmd() {
let output = cmd!(path_to_exe("echo"), "hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn
|
() {
// Windows compatible.
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_start() {
let handle1 = cmd!(path_to_exe("echo"), "hi")
.stdout_capture()
.start()
.unwrap();
let handle2 = cmd!(path_to_exe("echo"), "lo")
.stdout_capture()
.start()
.unwrap();
let output1 = handle1.wait().unwrap();
let output2 = handle2.wait().unwrap();
assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim());
}
#[test]
fn test_error() {
let result = false_cmd().run();
if let Err(err) = result {
assert_eq!(err.kind(), io::ErrorKind::Other);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_unchecked() {
let unchecked_false = false_cmd().unchecked();
// Unchecked errors shouldn't cause `run` to return an error.
let output = unchecked_false
.pipe(cmd!(path_to_exe("echo"), "waa"))
.stdout_capture()
.run()
.unwrap();
// The value of the exit code is preserved.
assert_eq!(1, output.status.code().unwrap());
assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim());
}
#[test]
fn test_unchecked_in_pipe() {
let zero = cmd!(path_to_exe("status"), "0");
let one = cmd!(path_to_exe("status"), "1");
let two = cmd!(path_to_exe("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill() {
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
}
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it. This
// also tests the interaction of `dir` and relative exe paths.
let dir = tempfile::tempdir().unwrap();
let pwd_output = pwd.dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
// pwd_path isn't totally canonical on Windows, because it
// doesn't have a prefix. Thus we have to canonicalize both
// sides. (This also handles symlinks in TMP_DIR.)
assert_eq!(
pwd_path.canonicalize().unwrap(),
dir.path().canonicalize().unwrap()
);
}
#[test]
fn test_env() {
let output = cmd!(path_to_exe("print_env"), "foo")
.env("foo", "bar")
.read()
.unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_full_env() {
// Note that it's important that no other tests use this variable name,
// because the test runner is multithreaded.
let var_name = "TEST_FULL_ENV";
// Capture the parent env, and make sure it does *not* contain our variable.
let clean_env: HashMap<String, String> = env::vars().collect();
assert!(
!clean_env.contains_key(var_name),
"why is this variable set?"
);
// Run a child process with that map passed to full_env(). It should be guaranteed not to
// see our variable, regardless of any outer env() calls or changes in the parent.
let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env);
// Dirty the parent env. Should be suppressed.
env::set_var(var_name, "junk1");
// And make an outer env() call. Should also be suppressed.
let dirty_child = clean_child.env(var_name, "junk2");
// Check that neither of those have any effect.
let output = dirty_child.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_remove() {
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE";
env::set_var(var_name, "junk2");
// Run a command that observes the variable.
let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
assert_eq!("junk2", output1);
// Run the same command with that variable removed.
let output2 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name)
.read()
.unwrap();
assert_eq!("", output2);
}
#[test]
fn test_env_remove_case_sensitivity() {
// Env var deletion is particularly sensitive to the differences in
// case-sensitivity between Unix and Windows. The semantics of env_remove
// in duct must *match the platform*.
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY";
env::set_var(var_name, "abc123");
// Run a command that tries to clear the same variable, but in lowercase.
let output1 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name.to_lowercase())
.read()
.unwrap();
// Now try to clear that variable from the parent environment, again using
// lowercase, and run the same command without `env_remove`.
env::remove_var(var_name.to_lowercase());
let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
// On Unix, env vars are case sensitive, and we don't expect either removal
// to have any effect. On Windows, they're insensitive, and we expect both
// removals to work. The key thing is that both approaches to removal have
// the *same effect*.
assert_eq!(output1, output2, "failed to match platform behavior!!!");
// Go ahead and assert the exact expected output, just in case. If these
// assertions ever break, it might be this test's fault and not the code's.
if cfg!(windows) {
assert_eq!(output1, "");
} else {
assert_eq!(output1, "abc123");
}
}
#[test]
fn test_broken_pipe() {
// If the input writing thread fills up its pipe buffer, writing will block. If the process
// on the other end of the pipe exits while writer is waiting, the write will return an
// error. We need to swallow that error, rather than returning it.
let myvec = vec![0; 1_000_000];
true_cmd().stdin_bytes(myvec).run().unwrap();
}
#[test]
fn test_silly() {
// A silly test, purely for coverage.
crate::IoValue::Null.try_clone().unwrap();
}
#[test]
fn test_path_sanitization() {
// We don't do any chdir'ing in this process, because the tests runner is multithreaded,
// and we don't want to screw up anyone else's relative paths. Instead, we shell out to a
// small test process that does that for us.
cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0")
.run()
.unwrap();
}
#[test]
fn test_before_spawn_hook() {
let (reader, mut writer) = os_pipe::pipe().unwrap();
let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| {
let reader_clone = reader.try_clone()?;
cmd.stdin(reader_clone);
Ok(())
});
writer.write_all(b"foobar").unwrap();
drop(writer);
let output = expr.read().unwrap();
assert_eq!("foobar", output);
}
#[test]
fn test_trailing_comma() {
let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap();
assert_eq!("trailing", output);
}
#[test]
fn test_no_argument() {
let output = cmd!(path_to_exe("echo")).read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_dropping_reader() {
// Use an explicit stderr pipe to test the ReaderHandle's drop behavior.
let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap();
let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000")
.stdout_file(stderr_writer)
.reader()
.unwrap();
// A zero-length read doesn't block.
let n = reader_handle.read(&mut []).unwrap();
assert_eq!(n, 0);
// Try-wait returns None.
let output = reader_handle.try_wait().unwrap();
assert!(output.is_none());
// Now we drop the reader. This kills the child.
drop(reader_handle);
// Now that the child is killed, reading the stderr pipe will not block.
// (Note that our copy was closed when the temporary Expression above
// dropped.)
let mut stderr = Vec::new();
let n = stderr_reader.read_to_end(&mut stderr).unwrap();
assert_eq!(n, 0);
}
#[test]
fn test_kill_with_grandchild() -> io::Result<()> {
// We're going to start a child process, and that child is going to start a
// grandchild. The grandchild is going to sleep forever (1 day). We'll read
// some output from the child to make sure it's done starting the
// grandchild, and then we'll kill the child. Now, the grandchild will not
// be killed, and it will still hold a write handle to the stdout pipe. So
// this tests that the wait done by kill only waits on the child to exit,
// and does not wait on IO to finish.
//
// This test leaks the grandchild process. I'm sorry.
// Capturing stderr means an IO thread is spawned, even though we're using
// a ReaderHandle to read stdout. What we're testing here is that kill()
// doesn't wait on that IO thread.
let mut reader = cmd!(path_to_exe("child_grandchild"))
.stderr_capture()
.reader()?;
// Read "started" from the child to make sure we don't kill it before it
// starts the grandchild.
let mut started_read = [0; 7];
reader.read_exact(&mut started_read)?;
assert_eq!(&started_read, b"started");
// Ok, this had better not block!
reader.kill()
}
#[test]
fn test_debug_format() {
let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong"));
assert_eq!(
format!("{:?}", e),
r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#,
);
}
#[test]
fn test_reader_try_wait() -> io::Result<()> {
// Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo
// back to us, so that it will block on its stdout pipe until we start
// reading.
let bytes = vec![42; 1_000_000];
let mut cat_reader = cmd!(path_to_exe("cat"))
.stdin_bytes(bytes.clone())
.reader()?;
assert!(cat_reader.try_wait()?.is_none());
let mut output = Vec::new();
cat_reader.read_to_end(&mut output)?;
assert_eq!(output, bytes);
let output = cat_reader.try_wait()?.expect("is some");
assert!(output.status.success());
assert!(output.stdout.is_empty());
assert!(output.stderr.is_empty());
Ok(())
}
#[test]
fn test_pids() -> io::Result<()> {
let handle = true_cmd().start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 1);
handle.wait()?;
let reader = true_cmd().reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 1);
std::io::copy(&mut &reader, &mut std::io::sink())?;
let handle = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 3);
handle.wait()?;
let reader = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 3);
std::io::copy(&mut &reader, &mut std::io::sink())?;
Ok(())
}
|
test_sh
|
identifier_name
|
test.rs
|
use super::{cmd, Expression};
use std;
use std::collections::HashMap;
use std::env;
use std::env::consts::EXE_EXTENSION;
use std::ffi::OsString;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::str;
use std::sync::{Arc, Once};
// Include a copy of the sh function, because we have a lot of old tests that
// use it, and it's a lot easier than managing a circular dependency between
// duct and duct_sh.
pub fn sh(command: &'static str) -> Expression {
let argv = shell_command_argv(command.into());
cmd(&argv[0], &argv[1..])
}
#[cfg(unix)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
vec!["/bin/sh".into(), "-c".into(), command]
}
#[cfg(windows)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into());
vec![comspec, "/C".into(), command]
}
pub fn path_to_exe(name: &str) -> PathBuf {
// This project defines some associated binaries for testing, and we shell out to them in
// these tests. `cargo test` doesn't automatically build associated binaries, so this
// function takes care of building them explicitly.
static CARGO_BUILD_ONCE: Once = Once::new();
CARGO_BUILD_ONCE.call_once(|| {
let build_status = Command::new("cargo")
.arg("build")
.arg("--quiet")
.status()
.unwrap();
assert!(
build_status.success(),
"Cargo failed to build associated binaries."
);
});
Path::new("target")
.join("debug")
.join(name)
.with_extension(EXE_EXTENSION)
}
pub fn true_cmd() -> Expression {
cmd!(path_to_exe("status"), "0")
}
fn false_cmd() -> Expression {
cmd!(path_to_exe("status"), "1")
}
#[test]
fn test_cmd() {
let output = cmd!(path_to_exe("echo"), "hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
// Windows compatible.
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_start() {
let handle1 = cmd!(path_to_exe("echo"), "hi")
.stdout_capture()
.start()
.unwrap();
let handle2 = cmd!(path_to_exe("echo"), "lo")
.stdout_capture()
.start()
.unwrap();
let output1 = handle1.wait().unwrap();
let output2 = handle2.wait().unwrap();
assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim());
}
#[test]
fn test_error() {
let result = false_cmd().run();
if let Err(err) = result {
assert_eq!(err.kind(), io::ErrorKind::Other);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_unchecked() {
let unchecked_false = false_cmd().unchecked();
// Unchecked errors shouldn't cause `run` to return an error.
let output = unchecked_false
.pipe(cmd!(path_to_exe("echo"), "waa"))
.stdout_capture()
.run()
.unwrap();
// The value of the exit code is preserved.
assert_eq!(1, output.status.code().unwrap());
assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim());
}
#[test]
fn test_unchecked_in_pipe() {
let zero = cmd!(path_to_exe("status"), "0");
let one = cmd!(path_to_exe("status"), "1");
let two = cmd!(path_to_exe("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill() {
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
}
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it. This
// also tests the interaction of `dir` and relative exe paths.
let dir = tempfile::tempdir().unwrap();
let pwd_output = pwd.dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
// pwd_path isn't totally canonical on Windows, because it
// doesn't have a prefix. Thus we have to canonicalize both
// sides. (This also handles symlinks in TMP_DIR.)
assert_eq!(
pwd_path.canonicalize().unwrap(),
dir.path().canonicalize().unwrap()
);
}
#[test]
fn test_env() {
let output = cmd!(path_to_exe("print_env"), "foo")
.env("foo", "bar")
.read()
.unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_full_env() {
// Note that it's important that no other tests use this variable name,
// because the test runner is multithreaded.
let var_name = "TEST_FULL_ENV";
// Capture the parent env, and make sure it does *not* contain our variable.
let clean_env: HashMap<String, String> = env::vars().collect();
assert!(
!clean_env.contains_key(var_name),
"why is this variable set?"
);
// Run a child process with that map passed to full_env(). It should be guaranteed not to
// see our variable, regardless of any outer env() calls or changes in the parent.
let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env);
// Dirty the parent env. Should be suppressed.
env::set_var(var_name, "junk1");
// And make an outer env() call. Should also be suppressed.
let dirty_child = clean_child.env(var_name, "junk2");
// Check that neither of those have any effect.
let output = dirty_child.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_remove() {
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE";
env::set_var(var_name, "junk2");
// Run a command that observes the variable.
let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
assert_eq!("junk2", output1);
// Run the same command with that variable removed.
let output2 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name)
.read()
.unwrap();
assert_eq!("", output2);
}
#[test]
fn test_env_remove_case_sensitivity() {
// Env var deletion is particularly sensitive to the differences in
// case-sensitivity between Unix and Windows. The semantics of env_remove
// in duct must *match the platform*.
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY";
env::set_var(var_name, "abc123");
// Run a command that tries to clear the same variable, but in lowercase.
let output1 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name.to_lowercase())
.read()
.unwrap();
// Now try to clear that variable from the parent environment, again using
// lowercase, and run the same command without `env_remove`.
env::remove_var(var_name.to_lowercase());
let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
// On Unix, env vars are case sensitive, and we don't expect either removal
// to have any effect. On Windows, they're insensitive, and we expect both
// removals to work. The key thing is that both approaches to removal have
// the *same effect*.
assert_eq!(output1, output2, "failed to match platform behavior!!!");
// Go ahead and assert the exact expected output, just in case. If these
// assertions ever break, it might be this test's fault and not the code's.
if cfg!(windows)
|
else {
assert_eq!(output1, "abc123");
}
}
#[test]
fn test_broken_pipe() {
// If the input writing thread fills up its pipe buffer, writing will block. If the process
// on the other end of the pipe exits while writer is waiting, the write will return an
// error. We need to swallow that error, rather than returning it.
let myvec = vec![0; 1_000_000];
true_cmd().stdin_bytes(myvec).run().unwrap();
}
#[test]
fn test_silly() {
// A silly test, purely for coverage.
crate::IoValue::Null.try_clone().unwrap();
}
#[test]
fn test_path_sanitization() {
// We don't do any chdir'ing in this process, because the tests runner is multithreaded,
// and we don't want to screw up anyone else's relative paths. Instead, we shell out to a
// small test process that does that for us.
cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0")
.run()
.unwrap();
}
#[test]
fn test_before_spawn_hook() {
let (reader, mut writer) = os_pipe::pipe().unwrap();
let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| {
let reader_clone = reader.try_clone()?;
cmd.stdin(reader_clone);
Ok(())
});
writer.write_all(b"foobar").unwrap();
drop(writer);
let output = expr.read().unwrap();
assert_eq!("foobar", output);
}
#[test]
fn test_trailing_comma() {
let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap();
assert_eq!("trailing", output);
}
#[test]
fn test_no_argument() {
let output = cmd!(path_to_exe("echo")).read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_dropping_reader() {
// Use an explicit stderr pipe to test the ReaderHandle's drop behavior.
let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap();
let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000")
.stdout_file(stderr_writer)
.reader()
.unwrap();
// A zero-length read doesn't block.
let n = reader_handle.read(&mut []).unwrap();
assert_eq!(n, 0);
// Try-wait returns None.
let output = reader_handle.try_wait().unwrap();
assert!(output.is_none());
// Now we drop the reader. This kills the child.
drop(reader_handle);
// Now that the child is killed, reading the stderr pipe will not block.
// (Note that our copy was closed when the temporary Expression above
// dropped.)
let mut stderr = Vec::new();
let n = stderr_reader.read_to_end(&mut stderr).unwrap();
assert_eq!(n, 0);
}
#[test]
fn test_kill_with_grandchild() -> io::Result<()> {
// We're going to start a child process, and that child is going to start a
// grandchild. The grandchild is going to sleep forever (1 day). We'll read
// some output from the child to make sure it's done starting the
// grandchild, and then we'll kill the child. Now, the grandchild will not
// be killed, and it will still hold a write handle to the stdout pipe. So
// this tests that the wait done by kill only waits on the child to exit,
// and does not wait on IO to finish.
//
// This test leaks the grandchild process. I'm sorry.
// Capturing stderr means an IO thread is spawned, even though we're using
// a ReaderHandle to read stdout. What we're testing here is that kill()
// doesn't wait on that IO thread.
let mut reader = cmd!(path_to_exe("child_grandchild"))
.stderr_capture()
.reader()?;
// Read "started" from the child to make sure we don't kill it before it
// starts the grandchild.
let mut started_read = [0; 7];
reader.read_exact(&mut started_read)?;
assert_eq!(&started_read, b"started");
// Ok, this had better not block!
reader.kill()
}
#[test]
fn test_debug_format() {
let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong"));
assert_eq!(
format!("{:?}", e),
r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#,
);
}
#[test]
fn test_reader_try_wait() -> io::Result<()> {
// Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo
// back to us, so that it will block on its stdout pipe until we start
// reading.
let bytes = vec![42; 1_000_000];
let mut cat_reader = cmd!(path_to_exe("cat"))
.stdin_bytes(bytes.clone())
.reader()?;
assert!(cat_reader.try_wait()?.is_none());
let mut output = Vec::new();
cat_reader.read_to_end(&mut output)?;
assert_eq!(output, bytes);
let output = cat_reader.try_wait()?.expect("is some");
assert!(output.status.success());
assert!(output.stdout.is_empty());
assert!(output.stderr.is_empty());
Ok(())
}
#[test]
fn test_pids() -> io::Result<()> {
let handle = true_cmd().start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 1);
handle.wait()?;
let reader = true_cmd().reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 1);
std::io::copy(&mut &reader, &mut std::io::sink())?;
let handle = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 3);
handle.wait()?;
let reader = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 3);
std::io::copy(&mut &reader, &mut std::io::sink())?;
Ok(())
}
|
{
assert_eq!(output1, "");
}
|
conditional_block
|
test.rs
|
use super::{cmd, Expression};
use std;
use std::collections::HashMap;
use std::env;
use std::env::consts::EXE_EXTENSION;
use std::ffi::OsString;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::str;
use std::sync::{Arc, Once};
// Include a copy of the sh function, because we have a lot of old tests that
// use it, and it's a lot easier than managing a circular dependency between
// duct and duct_sh.
pub fn sh(command: &'static str) -> Expression {
let argv = shell_command_argv(command.into());
cmd(&argv[0], &argv[1..])
}
#[cfg(unix)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
vec!["/bin/sh".into(), "-c".into(), command]
}
#[cfg(windows)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into());
vec![comspec, "/C".into(), command]
}
pub fn path_to_exe(name: &str) -> PathBuf {
// This project defines some associated binaries for testing, and we shell out to them in
// these tests. `cargo test` doesn't automatically build associated binaries, so this
// function takes care of building them explicitly.
static CARGO_BUILD_ONCE: Once = Once::new();
CARGO_BUILD_ONCE.call_once(|| {
let build_status = Command::new("cargo")
.arg("build")
.arg("--quiet")
.status()
.unwrap();
assert!(
build_status.success(),
"Cargo failed to build associated binaries."
);
});
Path::new("target")
.join("debug")
.join(name)
.with_extension(EXE_EXTENSION)
}
pub fn true_cmd() -> Expression {
cmd!(path_to_exe("status"), "0")
}
fn false_cmd() -> Expression {
cmd!(path_to_exe("status"), "1")
}
#[test]
fn test_cmd() {
let output = cmd!(path_to_exe("echo"), "hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
// Windows compatible.
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_start() {
let handle1 = cmd!(path_to_exe("echo"), "hi")
|
let handle2 = cmd!(path_to_exe("echo"), "lo")
.stdout_capture()
.start()
.unwrap();
let output1 = handle1.wait().unwrap();
let output2 = handle2.wait().unwrap();
assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim());
}
#[test]
fn test_error() {
let result = false_cmd().run();
if let Err(err) = result {
assert_eq!(err.kind(), io::ErrorKind::Other);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_unchecked() {
let unchecked_false = false_cmd().unchecked();
// Unchecked errors shouldn't cause `run` to return an error.
let output = unchecked_false
.pipe(cmd!(path_to_exe("echo"), "waa"))
.stdout_capture()
.run()
.unwrap();
// The value of the exit code is preserved.
assert_eq!(1, output.status.code().unwrap());
assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim());
}
#[test]
fn test_unchecked_in_pipe() {
let zero = cmd!(path_to_exe("status"), "0");
let one = cmd!(path_to_exe("status"), "1");
let two = cmd!(path_to_exe("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill() {
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
}
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it. This
// also tests the interaction of `dir` and relative exe paths.
let dir = tempfile::tempdir().unwrap();
let pwd_output = pwd.dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
// pwd_path isn't totally canonical on Windows, because it
// doesn't have a prefix. Thus we have to canonicalize both
// sides. (This also handles symlinks in TMP_DIR.)
assert_eq!(
pwd_path.canonicalize().unwrap(),
dir.path().canonicalize().unwrap()
);
}
#[test]
fn test_env() {
let output = cmd!(path_to_exe("print_env"), "foo")
.env("foo", "bar")
.read()
.unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_full_env() {
// Note that it's important that no other tests use this variable name,
// because the test runner is multithreaded.
let var_name = "TEST_FULL_ENV";
// Capture the parent env, and make sure it does *not* contain our variable.
let clean_env: HashMap<String, String> = env::vars().collect();
assert!(
!clean_env.contains_key(var_name),
"why is this variable set?"
);
// Run a child process with that map passed to full_env(). It should be guaranteed not to
// see our variable, regardless of any outer env() calls or changes in the parent.
let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env);
// Dirty the parent env. Should be suppressed.
env::set_var(var_name, "junk1");
// And make an outer env() call. Should also be suppressed.
let dirty_child = clean_child.env(var_name, "junk2");
// Check that neither of those have any effect.
let output = dirty_child.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_remove() {
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE";
env::set_var(var_name, "junk2");
// Run a command that observes the variable.
let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
assert_eq!("junk2", output1);
// Run the same command with that variable removed.
let output2 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name)
.read()
.unwrap();
assert_eq!("", output2);
}
#[test]
fn test_env_remove_case_sensitivity() {
// Env var deletion is particularly sensitive to the differences in
// case-sensitivity between Unix and Windows. The semantics of env_remove
// in duct must *match the platform*.
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY";
env::set_var(var_name, "abc123");
// Run a command that tries to clear the same variable, but in lowercase.
let output1 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name.to_lowercase())
.read()
.unwrap();
// Now try to clear that variable from the parent environment, again using
// lowercase, and run the same command without `env_remove`.
env::remove_var(var_name.to_lowercase());
let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
// On Unix, env vars are case sensitive, and we don't expect either removal
// to have any effect. On Windows, they're insensitive, and we expect both
// removals to work. The key thing is that both approaches to removal have
// the *same effect*.
assert_eq!(output1, output2, "failed to match platform behavior!!!");
// Go ahead and assert the exact expected output, just in case. If these
// assertions ever break, it might be this test's fault and not the code's.
if cfg!(windows) {
assert_eq!(output1, "");
} else {
assert_eq!(output1, "abc123");
}
}
#[test]
fn test_broken_pipe() {
// If the input writing thread fills up its pipe buffer, writing will block. If the process
// on the other end of the pipe exits while writer is waiting, the write will return an
// error. We need to swallow that error, rather than returning it.
let myvec = vec![0; 1_000_000];
true_cmd().stdin_bytes(myvec).run().unwrap();
}
#[test]
fn test_silly() {
// A silly test, purely for coverage.
crate::IoValue::Null.try_clone().unwrap();
}
#[test]
fn test_path_sanitization() {
// We don't do any chdir'ing in this process, because the tests runner is multithreaded,
// and we don't want to screw up anyone else's relative paths. Instead, we shell out to a
// small test process that does that for us.
cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0")
.run()
.unwrap();
}
#[test]
fn test_before_spawn_hook() {
let (reader, mut writer) = os_pipe::pipe().unwrap();
let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| {
let reader_clone = reader.try_clone()?;
cmd.stdin(reader_clone);
Ok(())
});
writer.write_all(b"foobar").unwrap();
drop(writer);
let output = expr.read().unwrap();
assert_eq!("foobar", output);
}
#[test]
fn test_trailing_comma() {
let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap();
assert_eq!("trailing", output);
}
#[test]
fn test_no_argument() {
let output = cmd!(path_to_exe("echo")).read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_dropping_reader() {
// Use an explicit stderr pipe to test the ReaderHandle's drop behavior.
let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap();
let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000")
.stdout_file(stderr_writer)
.reader()
.unwrap();
// A zero-length read doesn't block.
let n = reader_handle.read(&mut []).unwrap();
assert_eq!(n, 0);
// Try-wait returns None.
let output = reader_handle.try_wait().unwrap();
assert!(output.is_none());
// Now we drop the reader. This kills the child.
drop(reader_handle);
// Now that the child is killed, reading the stderr pipe will not block.
// (Note that our copy was closed when the temporary Expression above
// dropped.)
let mut stderr = Vec::new();
let n = stderr_reader.read_to_end(&mut stderr).unwrap();
assert_eq!(n, 0);
}
#[test]
fn test_kill_with_grandchild() -> io::Result<()> {
// We're going to start a child process, and that child is going to start a
// grandchild. The grandchild is going to sleep forever (1 day). We'll read
// some output from the child to make sure it's done starting the
// grandchild, and then we'll kill the child. Now, the grandchild will not
// be killed, and it will still hold a write handle to the stdout pipe. So
// this tests that the wait done by kill only waits on the child to exit,
// and does not wait on IO to finish.
//
// This test leaks the grandchild process. I'm sorry.
// Capturing stderr means an IO thread is spawned, even though we're using
// a ReaderHandle to read stdout. What we're testing here is that kill()
// doesn't wait on that IO thread.
let mut reader = cmd!(path_to_exe("child_grandchild"))
.stderr_capture()
.reader()?;
// Read "started" from the child to make sure we don't kill it before it
// starts the grandchild.
let mut started_read = [0; 7];
reader.read_exact(&mut started_read)?;
assert_eq!(&started_read, b"started");
// Ok, this had better not block!
reader.kill()
}
#[test]
fn test_debug_format() {
let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong"));
assert_eq!(
format!("{:?}", e),
r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#,
);
}
#[test]
fn test_reader_try_wait() -> io::Result<()> {
// Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo
// back to us, so that it will block on its stdout pipe until we start
// reading.
let bytes = vec![42; 1_000_000];
let mut cat_reader = cmd!(path_to_exe("cat"))
.stdin_bytes(bytes.clone())
.reader()?;
assert!(cat_reader.try_wait()?.is_none());
let mut output = Vec::new();
cat_reader.read_to_end(&mut output)?;
assert_eq!(output, bytes);
let output = cat_reader.try_wait()?.expect("is some");
assert!(output.status.success());
assert!(output.stdout.is_empty());
assert!(output.stderr.is_empty());
Ok(())
}
#[test]
fn test_pids() -> io::Result<()> {
let handle = true_cmd().start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 1);
handle.wait()?;
let reader = true_cmd().reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 1);
std::io::copy(&mut &reader, &mut std::io::sink())?;
let handle = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 3);
handle.wait()?;
let reader = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 3);
std::io::copy(&mut &reader, &mut std::io::sink())?;
Ok(())
}
|
.stdout_capture()
.start()
.unwrap();
|
random_line_split
|
test.rs
|
use super::{cmd, Expression};
use std;
use std::collections::HashMap;
use std::env;
use std::env::consts::EXE_EXTENSION;
use std::ffi::OsString;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::str;
use std::sync::{Arc, Once};
// Include a copy of the sh function, because we have a lot of old tests that
// use it, and it's a lot easier than managing a circular dependency between
// duct and duct_sh.
pub fn sh(command: &'static str) -> Expression {
let argv = shell_command_argv(command.into());
cmd(&argv[0], &argv[1..])
}
#[cfg(unix)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
vec!["/bin/sh".into(), "-c".into(), command]
}
#[cfg(windows)]
fn shell_command_argv(command: OsString) -> Vec<OsString> {
let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into());
vec![comspec, "/C".into(), command]
}
pub fn path_to_exe(name: &str) -> PathBuf {
// This project defines some associated binaries for testing, and we shell out to them in
// these tests. `cargo test` doesn't automatically build associated binaries, so this
// function takes care of building them explicitly.
static CARGO_BUILD_ONCE: Once = Once::new();
CARGO_BUILD_ONCE.call_once(|| {
let build_status = Command::new("cargo")
.arg("build")
.arg("--quiet")
.status()
.unwrap();
assert!(
build_status.success(),
"Cargo failed to build associated binaries."
);
});
Path::new("target")
.join("debug")
.join(name)
.with_extension(EXE_EXTENSION)
}
pub fn true_cmd() -> Expression {
cmd!(path_to_exe("status"), "0")
}
fn false_cmd() -> Expression {
cmd!(path_to_exe("status"), "1")
}
#[test]
fn test_cmd() {
let output = cmd!(path_to_exe("echo"), "hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
// Windows compatible.
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_start() {
let handle1 = cmd!(path_to_exe("echo"), "hi")
.stdout_capture()
.start()
.unwrap();
let handle2 = cmd!(path_to_exe("echo"), "lo")
.stdout_capture()
.start()
.unwrap();
let output1 = handle1.wait().unwrap();
let output2 = handle2.wait().unwrap();
assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim());
}
#[test]
fn test_error() {
let result = false_cmd().run();
if let Err(err) = result {
assert_eq!(err.kind(), io::ErrorKind::Other);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_unchecked() {
let unchecked_false = false_cmd().unchecked();
// Unchecked errors shouldn't cause `run` to return an error.
let output = unchecked_false
.pipe(cmd!(path_to_exe("echo"), "waa"))
.stdout_capture()
.run()
.unwrap();
// The value of the exit code is preserved.
assert_eq!(1, output.status.code().unwrap());
assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim());
}
#[test]
fn test_unchecked_in_pipe() {
let zero = cmd!(path_to_exe("status"), "0");
let one = cmd!(path_to_exe("status"), "1");
let two = cmd!(path_to_exe("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill()
|
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it. This
// also tests the interaction of `dir` and relative exe paths.
let dir = tempfile::tempdir().unwrap();
let pwd_output = pwd.dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
// pwd_path isn't totally canonical on Windows, because it
// doesn't have a prefix. Thus we have to canonicalize both
// sides. (This also handles symlinks in TMP_DIR.)
assert_eq!(
pwd_path.canonicalize().unwrap(),
dir.path().canonicalize().unwrap()
);
}
#[test]
fn test_env() {
let output = cmd!(path_to_exe("print_env"), "foo")
.env("foo", "bar")
.read()
.unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_full_env() {
// Note that it's important that no other tests use this variable name,
// because the test runner is multithreaded.
let var_name = "TEST_FULL_ENV";
// Capture the parent env, and make sure it does *not* contain our variable.
let clean_env: HashMap<String, String> = env::vars().collect();
assert!(
!clean_env.contains_key(var_name),
"why is this variable set?"
);
// Run a child process with that map passed to full_env(). It should be guaranteed not to
// see our variable, regardless of any outer env() calls or changes in the parent.
let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env);
// Dirty the parent env. Should be suppressed.
env::set_var(var_name, "junk1");
// And make an outer env() call. Should also be suppressed.
let dirty_child = clean_child.env(var_name, "junk2");
// Check that neither of those have any effect.
let output = dirty_child.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_remove() {
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE";
env::set_var(var_name, "junk2");
// Run a command that observes the variable.
let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
assert_eq!("junk2", output1);
// Run the same command with that variable removed.
let output2 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name)
.read()
.unwrap();
assert_eq!("", output2);
}
#[test]
fn test_env_remove_case_sensitivity() {
// Env var deletion is particularly sensitive to the differences in
// case-sensitivity between Unix and Windows. The semantics of env_remove
// in duct must *match the platform*.
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY";
env::set_var(var_name, "abc123");
// Run a command that tries to clear the same variable, but in lowercase.
let output1 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name.to_lowercase())
.read()
.unwrap();
// Now try to clear that variable from the parent environment, again using
// lowercase, and run the same command without `env_remove`.
env::remove_var(var_name.to_lowercase());
let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
// On Unix, env vars are case sensitive, and we don't expect either removal
// to have any effect. On Windows, they're insensitive, and we expect both
// removals to work. The key thing is that both approaches to removal have
// the *same effect*.
assert_eq!(output1, output2, "failed to match platform behavior!!!");
// Go ahead and assert the exact expected output, just in case. If these
// assertions ever break, it might be this test's fault and not the code's.
if cfg!(windows) {
assert_eq!(output1, "");
} else {
assert_eq!(output1, "abc123");
}
}
#[test]
fn test_broken_pipe() {
// If the input writing thread fills up its pipe buffer, writing will block. If the process
// on the other end of the pipe exits while writer is waiting, the write will return an
// error. We need to swallow that error, rather than returning it.
let myvec = vec![0; 1_000_000];
true_cmd().stdin_bytes(myvec).run().unwrap();
}
#[test]
fn test_silly() {
// A silly test, purely for coverage.
crate::IoValue::Null.try_clone().unwrap();
}
#[test]
fn test_path_sanitization() {
// We don't do any chdir'ing in this process, because the tests runner is multithreaded,
// and we don't want to screw up anyone else's relative paths. Instead, we shell out to a
// small test process that does that for us.
cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0")
.run()
.unwrap();
}
#[test]
fn test_before_spawn_hook() {
let (reader, mut writer) = os_pipe::pipe().unwrap();
let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| {
let reader_clone = reader.try_clone()?;
cmd.stdin(reader_clone);
Ok(())
});
writer.write_all(b"foobar").unwrap();
drop(writer);
let output = expr.read().unwrap();
assert_eq!("foobar", output);
}
#[test]
fn test_trailing_comma() {
let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap();
assert_eq!("trailing", output);
}
#[test]
fn test_no_argument() {
let output = cmd!(path_to_exe("echo")).read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_dropping_reader() {
// Use an explicit stderr pipe to test the ReaderHandle's drop behavior.
let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap();
let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000")
.stdout_file(stderr_writer)
.reader()
.unwrap();
// A zero-length read doesn't block.
let n = reader_handle.read(&mut []).unwrap();
assert_eq!(n, 0);
// Try-wait returns None.
let output = reader_handle.try_wait().unwrap();
assert!(output.is_none());
// Now we drop the reader. This kills the child.
drop(reader_handle);
// Now that the child is killed, reading the stderr pipe will not block.
// (Note that our copy was closed when the temporary Expression above
// dropped.)
let mut stderr = Vec::new();
let n = stderr_reader.read_to_end(&mut stderr).unwrap();
assert_eq!(n, 0);
}
#[test]
fn test_kill_with_grandchild() -> io::Result<()> {
// We're going to start a child process, and that child is going to start a
// grandchild. The grandchild is going to sleep forever (1 day). We'll read
// some output from the child to make sure it's done starting the
// grandchild, and then we'll kill the child. Now, the grandchild will not
// be killed, and it will still hold a write handle to the stdout pipe. So
// this tests that the wait done by kill only waits on the child to exit,
// and does not wait on IO to finish.
//
// This test leaks the grandchild process. I'm sorry.
// Capturing stderr means an IO thread is spawned, even though we're using
// a ReaderHandle to read stdout. What we're testing here is that kill()
// doesn't wait on that IO thread.
let mut reader = cmd!(path_to_exe("child_grandchild"))
.stderr_capture()
.reader()?;
// Read "started" from the child to make sure we don't kill it before it
// starts the grandchild.
let mut started_read = [0; 7];
reader.read_exact(&mut started_read)?;
assert_eq!(&started_read, b"started");
// Ok, this had better not block!
reader.kill()
}
#[test]
fn test_debug_format() {
let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong"));
assert_eq!(
format!("{:?}", e),
r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#,
);
}
#[test]
fn test_reader_try_wait() -> io::Result<()> {
// Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo
// back to us, so that it will block on its stdout pipe until we start
// reading.
let bytes = vec![42; 1_000_000];
let mut cat_reader = cmd!(path_to_exe("cat"))
.stdin_bytes(bytes.clone())
.reader()?;
assert!(cat_reader.try_wait()?.is_none());
let mut output = Vec::new();
cat_reader.read_to_end(&mut output)?;
assert_eq!(output, bytes);
let output = cat_reader.try_wait()?.expect("is some");
assert!(output.status.success());
assert!(output.stdout.is_empty());
assert!(output.stderr.is_empty());
Ok(())
}
#[test]
fn test_pids() -> io::Result<()> {
let handle = true_cmd().start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 1);
handle.wait()?;
let reader = true_cmd().reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 1);
std::io::copy(&mut &reader, &mut std::io::sink())?;
let handle = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.start()?;
let pids = handle.pids();
assert_eq!(pids.len(), 3);
handle.wait()?;
let reader = true_cmd()
.pipe(true_cmd().stdout_null().pipe(true_cmd()))
.reader()?;
let pids = reader.pids();
assert_eq!(pids.len(), 3);
std::io::copy(&mut &reader, &mut std::io::sink())?;
Ok(())
}
|
{
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
}
|
identifier_body
|
lib.rs
|
//! Pretty printing for Javascript values from [wasm-bindgen](https://docs.rs/wasm-bindgen).
#![forbid(unsafe_code)]
use js_sys::{
Array, Date, Error, Function, JsString, Map, Object, Promise, Reflect, RegExp, Set, Symbol,
WeakSet,
};
use std::{
collections::{BTreeMap, BTreeSet, HashSet},
fmt::{Debug, Display, Formatter, Result as FmtResult},
rc::Rc,
};
use wasm_bindgen::{JsCast, JsValue};
use web_sys::{Document, Element, Window};
pub trait Pretty {
fn pretty(&self) -> Prettified;
}
impl<T> Pretty for T
where
T: AsRef<JsValue>,
{
fn pretty(&self) -> Prettified {
Prettified {
value: self.as_ref().to_owned(),
seen: WeakSet::new(),
skip: Default::default(),
}
}
}
/// A pretty-printable value from Javascript.
pub struct Prettified {
/// The current value we're visiting.
value: JsValue,
/// We just use a JS array here to avoid relying on wasm-bindgen's unstable
/// ABI.
seen: WeakSet,
/// Properties we don't want serialized.
skip: Rc<HashSet<String>>,
}
impl Prettified {
/// Skip printing the property with `name` if it exists on any object
/// visited (transitively).
pub fn skip_property(&mut self, name: &str) -> &mut Self {
let mut with_name = HashSet::to_owned(&self.skip);
with_name.insert(name.to_owned());
self.skip = Rc::new(with_name);
self
}
fn child(&self, v: &JsValue) -> Self {
Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() }
}
// TODO get a serde_json::Value from this too
}
impl Debug for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
// detect and break cycles before trying to figure out Object subclass
// keeps a single path here rather than separately in each branch below
let mut _reset = None;
if let Some(obj) = self.value.dyn_ref::<Object>() {
if self.seen.has(obj) {
return write!(f, "[Cycle]");
}
self.seen.add(obj);
_reset = Some(scopeguard::guard(obj.to_owned(), |obj| {
self.seen.delete(&obj);
}));
}
if self.value.is_null() {
write!(f, "null")
} else if self.value.is_undefined() {
write!(f, "undefined")
} else if self.value.dyn_ref::<Function>().is_some() {
JsFunction.fmt(f)
} else if self.value.dyn_ref::<Promise>().is_some() {
write!(f, "[Promise]")
} else if self.value.dyn_ref::<Document>().is_some() {
write!(f, "[Document]")
} else if self.value.dyn_ref::<Window>().is_some() {
write!(f, "[Window]")
} else if let Some(s) = self.value.dyn_ref::<JsString>() {
write!(f, "{:?}", s.as_string().unwrap())
} else if let Some(n) = self.value.as_f64() {
write!(f, "{}", n)
} else if let Some(b) = self.value.as_bool() {
write!(f, "{:?}", b)
} else if let Some(d) = self.value.dyn_ref::<Date>() {
write!(f, "{}", d.to_iso_string().as_string().unwrap())
} else if let Some(d) = self.value.dyn_ref::<Element>() {
let name = d.tag_name().to_ascii_lowercase();
let (mut class, mut id) = (d.class_name(), d.id());
if!class.is_empty() {
class.insert_str(0, ".");
}
if!id.is_empty() {
id.insert_str(0, " #");
}
write!(f, "<{}{}{}/>", name, id, class)
} else if let Some(e) = self.value.dyn_ref::<Error>() {
write!(f, "Error: {}", e.to_string().as_string().unwrap())
} else if let Some(r) = self.value.dyn_ref::<RegExp>() {
write!(f, "/{}/", r.to_string().as_string().unwrap())
} else if let Some(s) = self.value.dyn_ref::<Symbol>() {
write!(f, "{}", s.to_string().as_string().unwrap())
} else if let Some(a) = self.value.dyn_ref::<Array>() {
let mut f = f.debug_list();
for val in a.iter() {
f.entry(&self.child(&val));
}
f.finish()
} else if let Some(s) = self.value.dyn_ref::<Set>() {
let mut f = f.debug_set();
let entries = s.entries();
while let Ok(next) = entries.next() {
if next.done() {
break;
}
f.entry(&self.child(&next.value()));
}
f.finish()
} else if let Some(m) = self.value.dyn_ref::<Map>() {
let mut f = f.debug_map();
let keys = m.keys();
while let Ok(next) = keys.next() {
if next.done() {
break;
}
let key = next.value();
let value = m.get(&key);
f.entry(&self.child(&key), &self.child(&value));
}
f.finish()
} else if let Some(obj) = self.value.dyn_ref::<Object>() {
let mut proto = obj.clone();
let mut props_seen = HashSet::new();
let name = obj.constructor().name().as_string().unwrap();
let mut f = f.debug_struct(&name);
loop {
let mut functions = BTreeSet::new();
let mut props = BTreeMap::new();
for raw_key in Object::get_own_property_names(&proto).iter() {
let key = raw_key.as_string().expect("object keys are always strings");
if (key.starts_with("__") && key.ends_with("__"))
|| props_seen.contains(&key)
|| functions.contains(&key)
|| self.skip.contains(&key)
{
continue;
}
if let Ok(value) = Reflect::get(obj, &raw_key) {
props_seen.insert(key.clone());
if value.is_function() {
functions.insert(key);
} else {
props.insert(key, self.child(&value));
}
}
}
for (key, value) in props {
f.field(&key, &value);
}
for key in functions {
f.field(&key, &JsFunction);
}
proto = Object::get_prototype_of(proto.as_ref());
if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" {
// we've reached the end of the prototype chain
break;
}
}
f.finish()
} else {
write!(f, "unknown ({:?})", &self.value)
}
}
}
impl Display for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:#?}", self)
}
}
struct
|
;
impl Debug for JsFunction {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "[Function]")
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot::channel;
use wasm_bindgen::closure::Closure;
use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};
use web_sys::{Event, EventTarget};
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn cycle_is_broken() {
let with_cycles = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
root.child.nested.push(root);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_cycles.pretty().to_string(),
r#"Object {
child: Object {
nested: [
[Cycle],
],
},
}"#
);
}
#[wasm_bindgen_test]
fn repeated_siblings_are_not_cycles() {
let with_siblings = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
let repeated_child = { foo: "bar" };
root.child.nested.push(repeated_child);
root.child.nested.push(repeated_child);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_siblings.pretty().to_string(),
r#"Object {
child: Object {
nested: [
Object {
foo: "bar",
},
Object {
foo: "bar",
},
],
},
}"#
);
}
#[wasm_bindgen_test]
async fn live_keyboard_event() {
// create an input element and bind it to the document
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input = document.create_element("input").unwrap();
// input.set_attribute("type", "text").unwrap();
document.body().unwrap().append_child(input.as_ref()).unwrap();
// create & add an event listener that will send the event back the test
let (send, recv) = channel();
let callback = Closure::once_into_js(move |ev: Event| {
send.send(ev).unwrap();
});
let target: &EventTarget = input.as_ref();
let event_type = "keydown";
target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap();
// create & dispatch an event to the input element
let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict(
event_type,
web_sys::KeyboardEventInit::new()
.char_code(b'F' as u32)
.bubbles(true)
.cancelable(true)
.view(Some(&window)),
)
.unwrap();
let sent: &Event = sent_event.as_ref();
assert!(target.dispatch_event(sent).unwrap());
// wait for the event to come back
let received_event: Event = recv.await.unwrap();
// make sure we can print it without exploding due to nesting
assert_eq!(
received_event.pretty().skip_property("timeStamp").to_string(),
r#"KeyboardEvent {
isTrusted: false,
DOM_KEY_LOCATION_LEFT: 1,
DOM_KEY_LOCATION_NUMPAD: 3,
DOM_KEY_LOCATION_RIGHT: 2,
DOM_KEY_LOCATION_STANDARD: 0,
altKey: false,
charCode: 70,
code: "",
ctrlKey: false,
isComposing: false,
key: "",
keyCode: 0,
location: 0,
metaKey: false,
repeat: false,
shiftKey: false,
constructor: [Function],
getModifierState: [Function],
initKeyboardEvent: [Function],
detail: 0,
sourceCapabilities: null,
view: [Window],
which: 0,
initUIEvent: [Function],
AT_TARGET: 2,
BUBBLING_PHASE: 3,
CAPTURING_PHASE: 1,
NONE: 0,
bubbles: true,
cancelBubble: false,
cancelable: true,
composed: false,
currentTarget: null,
defaultPrevented: false,
eventPhase: 0,
path: [
<input/>,
<body/>,
<html/>,
[Document],
[Window],
],
returnValue: true,
srcElement: <input/>,
target: <input/>,
type: "keydown",
composedPath: [Function],
initEvent: [Function],
preventDefault: [Function],
stopImmediatePropagation: [Function],
stopPropagation: [Function],
}"#,
);
}
}
|
JsFunction
|
identifier_name
|
lib.rs
|
//! Pretty printing for Javascript values from [wasm-bindgen](https://docs.rs/wasm-bindgen).
#![forbid(unsafe_code)]
use js_sys::{
Array, Date, Error, Function, JsString, Map, Object, Promise, Reflect, RegExp, Set, Symbol,
WeakSet,
};
use std::{
collections::{BTreeMap, BTreeSet, HashSet},
fmt::{Debug, Display, Formatter, Result as FmtResult},
rc::Rc,
};
use wasm_bindgen::{JsCast, JsValue};
use web_sys::{Document, Element, Window};
pub trait Pretty {
fn pretty(&self) -> Prettified;
}
|
where
T: AsRef<JsValue>,
{
fn pretty(&self) -> Prettified {
Prettified {
value: self.as_ref().to_owned(),
seen: WeakSet::new(),
skip: Default::default(),
}
}
}
/// A pretty-printable value from Javascript.
pub struct Prettified {
/// The current value we're visiting.
value: JsValue,
/// We just use a JS array here to avoid relying on wasm-bindgen's unstable
/// ABI.
seen: WeakSet,
/// Properties we don't want serialized.
skip: Rc<HashSet<String>>,
}
impl Prettified {
/// Skip printing the property with `name` if it exists on any object
/// visited (transitively).
pub fn skip_property(&mut self, name: &str) -> &mut Self {
let mut with_name = HashSet::to_owned(&self.skip);
with_name.insert(name.to_owned());
self.skip = Rc::new(with_name);
self
}
fn child(&self, v: &JsValue) -> Self {
Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() }
}
// TODO get a serde_json::Value from this too
}
impl Debug for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
// detect and break cycles before trying to figure out Object subclass
// keeps a single path here rather than separately in each branch below
let mut _reset = None;
if let Some(obj) = self.value.dyn_ref::<Object>() {
if self.seen.has(obj) {
return write!(f, "[Cycle]");
}
self.seen.add(obj);
_reset = Some(scopeguard::guard(obj.to_owned(), |obj| {
self.seen.delete(&obj);
}));
}
if self.value.is_null() {
write!(f, "null")
} else if self.value.is_undefined() {
write!(f, "undefined")
} else if self.value.dyn_ref::<Function>().is_some() {
JsFunction.fmt(f)
} else if self.value.dyn_ref::<Promise>().is_some() {
write!(f, "[Promise]")
} else if self.value.dyn_ref::<Document>().is_some() {
write!(f, "[Document]")
} else if self.value.dyn_ref::<Window>().is_some() {
write!(f, "[Window]")
} else if let Some(s) = self.value.dyn_ref::<JsString>() {
write!(f, "{:?}", s.as_string().unwrap())
} else if let Some(n) = self.value.as_f64() {
write!(f, "{}", n)
} else if let Some(b) = self.value.as_bool() {
write!(f, "{:?}", b)
} else if let Some(d) = self.value.dyn_ref::<Date>() {
write!(f, "{}", d.to_iso_string().as_string().unwrap())
} else if let Some(d) = self.value.dyn_ref::<Element>() {
let name = d.tag_name().to_ascii_lowercase();
let (mut class, mut id) = (d.class_name(), d.id());
if!class.is_empty() {
class.insert_str(0, ".");
}
if!id.is_empty() {
id.insert_str(0, " #");
}
write!(f, "<{}{}{}/>", name, id, class)
} else if let Some(e) = self.value.dyn_ref::<Error>() {
write!(f, "Error: {}", e.to_string().as_string().unwrap())
} else if let Some(r) = self.value.dyn_ref::<RegExp>() {
write!(f, "/{}/", r.to_string().as_string().unwrap())
} else if let Some(s) = self.value.dyn_ref::<Symbol>() {
write!(f, "{}", s.to_string().as_string().unwrap())
} else if let Some(a) = self.value.dyn_ref::<Array>() {
let mut f = f.debug_list();
for val in a.iter() {
f.entry(&self.child(&val));
}
f.finish()
} else if let Some(s) = self.value.dyn_ref::<Set>() {
let mut f = f.debug_set();
let entries = s.entries();
while let Ok(next) = entries.next() {
if next.done() {
break;
}
f.entry(&self.child(&next.value()));
}
f.finish()
} else if let Some(m) = self.value.dyn_ref::<Map>() {
let mut f = f.debug_map();
let keys = m.keys();
while let Ok(next) = keys.next() {
if next.done() {
break;
}
let key = next.value();
let value = m.get(&key);
f.entry(&self.child(&key), &self.child(&value));
}
f.finish()
} else if let Some(obj) = self.value.dyn_ref::<Object>() {
let mut proto = obj.clone();
let mut props_seen = HashSet::new();
let name = obj.constructor().name().as_string().unwrap();
let mut f = f.debug_struct(&name);
loop {
let mut functions = BTreeSet::new();
let mut props = BTreeMap::new();
for raw_key in Object::get_own_property_names(&proto).iter() {
let key = raw_key.as_string().expect("object keys are always strings");
if (key.starts_with("__") && key.ends_with("__"))
|| props_seen.contains(&key)
|| functions.contains(&key)
|| self.skip.contains(&key)
{
continue;
}
if let Ok(value) = Reflect::get(obj, &raw_key) {
props_seen.insert(key.clone());
if value.is_function() {
functions.insert(key);
} else {
props.insert(key, self.child(&value));
}
}
}
for (key, value) in props {
f.field(&key, &value);
}
for key in functions {
f.field(&key, &JsFunction);
}
proto = Object::get_prototype_of(proto.as_ref());
if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" {
// we've reached the end of the prototype chain
break;
}
}
f.finish()
} else {
write!(f, "unknown ({:?})", &self.value)
}
}
}
impl Display for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:#?}", self)
}
}
struct JsFunction;
impl Debug for JsFunction {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "[Function]")
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot::channel;
use wasm_bindgen::closure::Closure;
use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};
use web_sys::{Event, EventTarget};
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn cycle_is_broken() {
let with_cycles = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
root.child.nested.push(root);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_cycles.pretty().to_string(),
r#"Object {
child: Object {
nested: [
[Cycle],
],
},
}"#
);
}
#[wasm_bindgen_test]
fn repeated_siblings_are_not_cycles() {
let with_siblings = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
let repeated_child = { foo: "bar" };
root.child.nested.push(repeated_child);
root.child.nested.push(repeated_child);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_siblings.pretty().to_string(),
r#"Object {
child: Object {
nested: [
Object {
foo: "bar",
},
Object {
foo: "bar",
},
],
},
}"#
);
}
#[wasm_bindgen_test]
async fn live_keyboard_event() {
// create an input element and bind it to the document
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input = document.create_element("input").unwrap();
// input.set_attribute("type", "text").unwrap();
document.body().unwrap().append_child(input.as_ref()).unwrap();
// create & add an event listener that will send the event back the test
let (send, recv) = channel();
let callback = Closure::once_into_js(move |ev: Event| {
send.send(ev).unwrap();
});
let target: &EventTarget = input.as_ref();
let event_type = "keydown";
target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap();
// create & dispatch an event to the input element
let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict(
event_type,
web_sys::KeyboardEventInit::new()
.char_code(b'F' as u32)
.bubbles(true)
.cancelable(true)
.view(Some(&window)),
)
.unwrap();
let sent: &Event = sent_event.as_ref();
assert!(target.dispatch_event(sent).unwrap());
// wait for the event to come back
let received_event: Event = recv.await.unwrap();
// make sure we can print it without exploding due to nesting
assert_eq!(
received_event.pretty().skip_property("timeStamp").to_string(),
r#"KeyboardEvent {
isTrusted: false,
DOM_KEY_LOCATION_LEFT: 1,
DOM_KEY_LOCATION_NUMPAD: 3,
DOM_KEY_LOCATION_RIGHT: 2,
DOM_KEY_LOCATION_STANDARD: 0,
altKey: false,
charCode: 70,
code: "",
ctrlKey: false,
isComposing: false,
key: "",
keyCode: 0,
location: 0,
metaKey: false,
repeat: false,
shiftKey: false,
constructor: [Function],
getModifierState: [Function],
initKeyboardEvent: [Function],
detail: 0,
sourceCapabilities: null,
view: [Window],
which: 0,
initUIEvent: [Function],
AT_TARGET: 2,
BUBBLING_PHASE: 3,
CAPTURING_PHASE: 1,
NONE: 0,
bubbles: true,
cancelBubble: false,
cancelable: true,
composed: false,
currentTarget: null,
defaultPrevented: false,
eventPhase: 0,
path: [
<input/>,
<body/>,
<html/>,
[Document],
[Window],
],
returnValue: true,
srcElement: <input/>,
target: <input/>,
type: "keydown",
composedPath: [Function],
initEvent: [Function],
preventDefault: [Function],
stopImmediatePropagation: [Function],
stopPropagation: [Function],
}"#,
);
}
}
|
impl<T> Pretty for T
|
random_line_split
|
lib.rs
|
//! Pretty printing for Javascript values from [wasm-bindgen](https://docs.rs/wasm-bindgen).
#![forbid(unsafe_code)]
use js_sys::{
Array, Date, Error, Function, JsString, Map, Object, Promise, Reflect, RegExp, Set, Symbol,
WeakSet,
};
use std::{
collections::{BTreeMap, BTreeSet, HashSet},
fmt::{Debug, Display, Formatter, Result as FmtResult},
rc::Rc,
};
use wasm_bindgen::{JsCast, JsValue};
use web_sys::{Document, Element, Window};
pub trait Pretty {
fn pretty(&self) -> Prettified;
}
impl<T> Pretty for T
where
T: AsRef<JsValue>,
{
fn pretty(&self) -> Prettified {
Prettified {
value: self.as_ref().to_owned(),
seen: WeakSet::new(),
skip: Default::default(),
}
}
}
/// A pretty-printable value from Javascript.
pub struct Prettified {
/// The current value we're visiting.
value: JsValue,
/// We just use a JS array here to avoid relying on wasm-bindgen's unstable
/// ABI.
seen: WeakSet,
/// Properties we don't want serialized.
skip: Rc<HashSet<String>>,
}
impl Prettified {
/// Skip printing the property with `name` if it exists on any object
/// visited (transitively).
pub fn skip_property(&mut self, name: &str) -> &mut Self {
let mut with_name = HashSet::to_owned(&self.skip);
with_name.insert(name.to_owned());
self.skip = Rc::new(with_name);
self
}
fn child(&self, v: &JsValue) -> Self {
Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() }
}
// TODO get a serde_json::Value from this too
}
impl Debug for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
// detect and break cycles before trying to figure out Object subclass
// keeps a single path here rather than separately in each branch below
let mut _reset = None;
if let Some(obj) = self.value.dyn_ref::<Object>() {
if self.seen.has(obj) {
return write!(f, "[Cycle]");
}
self.seen.add(obj);
_reset = Some(scopeguard::guard(obj.to_owned(), |obj| {
self.seen.delete(&obj);
}));
}
if self.value.is_null() {
write!(f, "null")
} else if self.value.is_undefined() {
write!(f, "undefined")
} else if self.value.dyn_ref::<Function>().is_some() {
JsFunction.fmt(f)
} else if self.value.dyn_ref::<Promise>().is_some() {
write!(f, "[Promise]")
} else if self.value.dyn_ref::<Document>().is_some() {
write!(f, "[Document]")
} else if self.value.dyn_ref::<Window>().is_some() {
write!(f, "[Window]")
} else if let Some(s) = self.value.dyn_ref::<JsString>() {
write!(f, "{:?}", s.as_string().unwrap())
} else if let Some(n) = self.value.as_f64() {
write!(f, "{}", n)
} else if let Some(b) = self.value.as_bool() {
write!(f, "{:?}", b)
} else if let Some(d) = self.value.dyn_ref::<Date>() {
write!(f, "{}", d.to_iso_string().as_string().unwrap())
} else if let Some(d) = self.value.dyn_ref::<Element>() {
let name = d.tag_name().to_ascii_lowercase();
let (mut class, mut id) = (d.class_name(), d.id());
if!class.is_empty() {
class.insert_str(0, ".");
}
if!id.is_empty() {
id.insert_str(0, " #");
}
write!(f, "<{}{}{}/>", name, id, class)
} else if let Some(e) = self.value.dyn_ref::<Error>() {
write!(f, "Error: {}", e.to_string().as_string().unwrap())
} else if let Some(r) = self.value.dyn_ref::<RegExp>() {
write!(f, "/{}/", r.to_string().as_string().unwrap())
} else if let Some(s) = self.value.dyn_ref::<Symbol>() {
write!(f, "{}", s.to_string().as_string().unwrap())
} else if let Some(a) = self.value.dyn_ref::<Array>() {
let mut f = f.debug_list();
for val in a.iter() {
f.entry(&self.child(&val));
}
f.finish()
} else if let Some(s) = self.value.dyn_ref::<Set>() {
let mut f = f.debug_set();
let entries = s.entries();
while let Ok(next) = entries.next() {
if next.done() {
break;
}
f.entry(&self.child(&next.value()));
}
f.finish()
} else if let Some(m) = self.value.dyn_ref::<Map>() {
let mut f = f.debug_map();
let keys = m.keys();
while let Ok(next) = keys.next() {
if next.done() {
break;
}
let key = next.value();
let value = m.get(&key);
f.entry(&self.child(&key), &self.child(&value));
}
f.finish()
} else if let Some(obj) = self.value.dyn_ref::<Object>() {
let mut proto = obj.clone();
let mut props_seen = HashSet::new();
let name = obj.constructor().name().as_string().unwrap();
let mut f = f.debug_struct(&name);
loop {
let mut functions = BTreeSet::new();
let mut props = BTreeMap::new();
for raw_key in Object::get_own_property_names(&proto).iter() {
let key = raw_key.as_string().expect("object keys are always strings");
if (key.starts_with("__") && key.ends_with("__"))
|| props_seen.contains(&key)
|| functions.contains(&key)
|| self.skip.contains(&key)
{
continue;
}
if let Ok(value) = Reflect::get(obj, &raw_key) {
props_seen.insert(key.clone());
if value.is_function() {
functions.insert(key);
} else {
props.insert(key, self.child(&value));
}
}
}
for (key, value) in props {
f.field(&key, &value);
}
for key in functions {
f.field(&key, &JsFunction);
}
proto = Object::get_prototype_of(proto.as_ref());
if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" {
// we've reached the end of the prototype chain
break;
}
}
f.finish()
} else {
write!(f, "unknown ({:?})", &self.value)
}
}
}
impl Display for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:#?}", self)
}
}
struct JsFunction;
impl Debug for JsFunction {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "[Function]")
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot::channel;
use wasm_bindgen::closure::Closure;
use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};
use web_sys::{Event, EventTarget};
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn cycle_is_broken()
|
);
}
#[wasm_bindgen_test]
fn repeated_siblings_are_not_cycles() {
let with_siblings = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
let repeated_child = { foo: "bar" };
root.child.nested.push(repeated_child);
root.child.nested.push(repeated_child);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_siblings.pretty().to_string(),
r#"Object {
child: Object {
nested: [
Object {
foo: "bar",
},
Object {
foo: "bar",
},
],
},
}"#
);
}
#[wasm_bindgen_test]
async fn live_keyboard_event() {
// create an input element and bind it to the document
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input = document.create_element("input").unwrap();
// input.set_attribute("type", "text").unwrap();
document.body().unwrap().append_child(input.as_ref()).unwrap();
// create & add an event listener that will send the event back the test
let (send, recv) = channel();
let callback = Closure::once_into_js(move |ev: Event| {
send.send(ev).unwrap();
});
let target: &EventTarget = input.as_ref();
let event_type = "keydown";
target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap();
// create & dispatch an event to the input element
let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict(
event_type,
web_sys::KeyboardEventInit::new()
.char_code(b'F' as u32)
.bubbles(true)
.cancelable(true)
.view(Some(&window)),
)
.unwrap();
let sent: &Event = sent_event.as_ref();
assert!(target.dispatch_event(sent).unwrap());
// wait for the event to come back
let received_event: Event = recv.await.unwrap();
// make sure we can print it without exploding due to nesting
assert_eq!(
received_event.pretty().skip_property("timeStamp").to_string(),
r#"KeyboardEvent {
isTrusted: false,
DOM_KEY_LOCATION_LEFT: 1,
DOM_KEY_LOCATION_NUMPAD: 3,
DOM_KEY_LOCATION_RIGHT: 2,
DOM_KEY_LOCATION_STANDARD: 0,
altKey: false,
charCode: 70,
code: "",
ctrlKey: false,
isComposing: false,
key: "",
keyCode: 0,
location: 0,
metaKey: false,
repeat: false,
shiftKey: false,
constructor: [Function],
getModifierState: [Function],
initKeyboardEvent: [Function],
detail: 0,
sourceCapabilities: null,
view: [Window],
which: 0,
initUIEvent: [Function],
AT_TARGET: 2,
BUBBLING_PHASE: 3,
CAPTURING_PHASE: 1,
NONE: 0,
bubbles: true,
cancelBubble: false,
cancelable: true,
composed: false,
currentTarget: null,
defaultPrevented: false,
eventPhase: 0,
path: [
<input/>,
<body/>,
<html/>,
[Document],
[Window],
],
returnValue: true,
srcElement: <input/>,
target: <input/>,
type: "keydown",
composedPath: [Function],
initEvent: [Function],
preventDefault: [Function],
stopImmediatePropagation: [Function],
stopPropagation: [Function],
}"#,
);
}
}
|
{
let with_cycles = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
root.child.nested.push(root);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_cycles.pretty().to_string(),
r#"Object {
child: Object {
nested: [
[Cycle],
],
},
}"#
|
identifier_body
|
renderer.rs
|
use std::f64;
use pcg_rand::Pcg32;
use rand::{Rng, SeedableRng};
use crate::hitpoint::Hitpoint;
use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub};
use crate::ray::Ray;
use crate::renderershape::RendererShape;
use crate::rendereroutputpixel::RendererOutputPixel;
use crate::rendererscene::RendererScene;
use crate::renderertype::RendererType;
use crate::NUMBER_OF_BINS;
//static MODULO: [usize; 5] = [0, 1, 2, 0, 1];
pub struct Renderer {
width: u32,
height: u32,
spp_per_iteration: u32,
maximum_spp: u32,
maximum_error: f64,
maximum_brdf_value: f64,
perform_post_process: bool,
scene: RendererScene,
renderer_output_pixels: Vec<RendererOutputPixel>
}
impl Renderer {
pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self {
let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new();
Self {
width,
height,
spp_per_iteration,
maximum_spp,
maximum_error,
maximum_brdf_value,
perform_post_process,
scene,
renderer_output_pixels,
}
}
pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> {
self.renderer_output_pixels.to_vec()
}
pub fn render(&mut self, y: u32, x: u32) {
if x == self.width/2 {
println!("Rendering row {} of {}.", y, self.height);
}
let number_of_light_spheres = self.scene.light_spheres.len();
let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32);
let number_of_cameras = self.scene.cameras.len();
self.renderer_output_pixels.push(RendererOutputPixel::new(y, x));
let last_pos = self.renderer_output_pixels.len()-1;
let mut pcg: Pcg32 = Pcg32::from_entropy();
let mut colors: Vec<[f64; 3]> = Vec::new();
let mut converged = false;
// Loop over this pixel until we estimate the error to be small enough.
let mut iterations = 0;
while!converged {
iterations += 1;
for _ in 0..self.spp_per_iteration {
// Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape.
let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new();
self.renderer_output_pixels[last_pos].number_of_rays += 1.0;
let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize;
let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg);
let mut color = self.scene.cameras[index].color;
self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg);
let mut total_color = [0.0, 0.0, 0.0];
if hitpoint_path_from_camera.is_empty() {
colors.push(total_color);
continue;
}
let direct_light_sampling = false;
if direct_light_sampling {
// Connect the camera path hitpoints with points on lights.
for hitpoint_in_camera_path in &hitpoint_path_from_camera {
let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg);
total_color = add(total_color, color);
}
self.store(last_pos, total_color);
colors.push(total_color);
} else {
for hitpoint in hitpoint_path_from_camera.iter().rev() {
total_color = elementwise_mul(total_color, hitpoint.material.color);
total_color = add(total_color, hitpoint.material.emission);
}
self.store(last_pos, total_color);
colors.push(total_color);
}
}
if colors.is_empty() {
break;
}
// Estimate the error. If it's too large, create more rays for this pixel.
let number_of_batches: usize = 20;
let number_of_rays = colors.len();
let batch_size = number_of_rays/number_of_batches;
let mut averages = self.averages(&colors, number_of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size!= 0.
fn
|
(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] {
largest[j] = average[j];
}
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be.emission rather than.color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance = norm(direction);
let direction_normalised = mul(1.0/distance, direction);
if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 {
return [0.0, 0.0, 0.0];
}
let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised));
if let Some(closest_hitpoint) = closest_hitpoint {
// @TODO Check if this is sane.
if distance-closest_hitpoint.distance > 1.0e-9 {
return [0.0, 0.0, 0.0];
}
}
let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction);
let outgoing_direction = mul(-1.0, direction_normalised);
let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 {
hitpoint_in_camera_path.normal
} else {
mul(-1.0, hitpoint_in_camera_path.normal)
};
let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside {
(1.0, hitpoint_in_camera_path.material.refractive_index)
} else {
(hitpoint_in_camera_path.material.refractive_index, 1.0)
};
// @TODO: Get rid of the upper limit of the brdf.
let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value);
mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color))
}
fn store(&mut self, last_pos: usize, color: [f64; 3]) {
self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color);
self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color);
self.renderer_output_pixels[last_pos].number_of_bin_elements += 1;
if self.perform_post_process {
let color = intensity_to_color(color);
self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1;
}
}
// Find the closest hitpoint.
fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> {
let mut min_distance = f64::MAX;
let mut closest_renderer_shape_index: Option<usize> = None;
let mut renderer_type = RendererType::Cylinder;
for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() {
if!cylinder.active {
continue;
}
let distance = cylinder.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
}
}
for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() {
if!sphere.active {
continue;
}
let distance = sphere.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Sphere;
}
}
for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() {
if!triangle.active {
continue;
}
let distance = triangle.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Triangle;
}
}
if let Some(index) = closest_renderer_shape_index {
let position = add(ray.position, mul(min_distance, ray.direction));
let (normal, material) = match renderer_type {
RendererType::Cylinder => {
(self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material())
}
RendererType::Sphere => {
(self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material())
}
RendererType::Triangle => {
(self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material())
}
};
let hit_from_outside = dot(ray.direction, normal) < 0.0;
Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0]))
} else {
None
}
}
}
|
averages
|
identifier_name
|
renderer.rs
|
use std::f64;
use pcg_rand::Pcg32;
use rand::{Rng, SeedableRng};
use crate::hitpoint::Hitpoint;
use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub};
use crate::ray::Ray;
use crate::renderershape::RendererShape;
use crate::rendereroutputpixel::RendererOutputPixel;
use crate::rendererscene::RendererScene;
use crate::renderertype::RendererType;
use crate::NUMBER_OF_BINS;
//static MODULO: [usize; 5] = [0, 1, 2, 0, 1];
pub struct Renderer {
width: u32,
height: u32,
spp_per_iteration: u32,
maximum_spp: u32,
maximum_error: f64,
maximum_brdf_value: f64,
perform_post_process: bool,
scene: RendererScene,
renderer_output_pixels: Vec<RendererOutputPixel>
}
impl Renderer {
pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self {
let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new();
Self {
width,
height,
spp_per_iteration,
maximum_spp,
maximum_error,
maximum_brdf_value,
perform_post_process,
scene,
renderer_output_pixels,
}
}
pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> {
self.renderer_output_pixels.to_vec()
}
pub fn render(&mut self, y: u32, x: u32) {
if x == self.width/2 {
println!("Rendering row {} of {}.", y, self.height);
}
let number_of_light_spheres = self.scene.light_spheres.len();
let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32);
let number_of_cameras = self.scene.cameras.len();
self.renderer_output_pixels.push(RendererOutputPixel::new(y, x));
let last_pos = self.renderer_output_pixels.len()-1;
let mut pcg: Pcg32 = Pcg32::from_entropy();
let mut colors: Vec<[f64; 3]> = Vec::new();
let mut converged = false;
// Loop over this pixel until we estimate the error to be small enough.
let mut iterations = 0;
while!converged {
iterations += 1;
for _ in 0..self.spp_per_iteration {
// Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape.
let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new();
self.renderer_output_pixels[last_pos].number_of_rays += 1.0;
let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize;
let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg);
let mut color = self.scene.cameras[index].color;
self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg);
let mut total_color = [0.0, 0.0, 0.0];
if hitpoint_path_from_camera.is_empty() {
colors.push(total_color);
continue;
}
let direct_light_sampling = false;
if direct_light_sampling {
// Connect the camera path hitpoints with points on lights.
for hitpoint_in_camera_path in &hitpoint_path_from_camera {
let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg);
total_color = add(total_color, color);
}
self.store(last_pos, total_color);
colors.push(total_color);
} else {
for hitpoint in hitpoint_path_from_camera.iter().rev() {
total_color = elementwise_mul(total_color, hitpoint.material.color);
total_color = add(total_color, hitpoint.material.emission);
}
self.store(last_pos, total_color);
colors.push(total_color);
}
}
if colors.is_empty() {
break;
}
// Estimate the error. If it's too large, create more rays for this pixel.
let number_of_batches: usize = 20;
let number_of_rays = colors.len();
let batch_size = number_of_rays/number_of_batches;
let mut averages = self.averages(&colors, number_of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size!= 0.
fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j]
|
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be.emission rather than.color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance = norm(direction);
let direction_normalised = mul(1.0/distance, direction);
if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 {
return [0.0, 0.0, 0.0];
}
let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised));
if let Some(closest_hitpoint) = closest_hitpoint {
// @TODO Check if this is sane.
if distance-closest_hitpoint.distance > 1.0e-9 {
return [0.0, 0.0, 0.0];
}
}
let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction);
let outgoing_direction = mul(-1.0, direction_normalised);
let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 {
hitpoint_in_camera_path.normal
} else {
mul(-1.0, hitpoint_in_camera_path.normal)
};
let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside {
(1.0, hitpoint_in_camera_path.material.refractive_index)
} else {
(hitpoint_in_camera_path.material.refractive_index, 1.0)
};
// @TODO: Get rid of the upper limit of the brdf.
let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value);
mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color))
}
fn store(&mut self, last_pos: usize, color: [f64; 3]) {
self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color);
self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color);
self.renderer_output_pixels[last_pos].number_of_bin_elements += 1;
if self.perform_post_process {
let color = intensity_to_color(color);
self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1;
}
}
// Find the closest hitpoint.
fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> {
let mut min_distance = f64::MAX;
let mut closest_renderer_shape_index: Option<usize> = None;
let mut renderer_type = RendererType::Cylinder;
for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() {
if!cylinder.active {
continue;
}
let distance = cylinder.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
}
}
for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() {
if!sphere.active {
continue;
}
let distance = sphere.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Sphere;
}
}
for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() {
if!triangle.active {
continue;
}
let distance = triangle.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Triangle;
}
}
if let Some(index) = closest_renderer_shape_index {
let position = add(ray.position, mul(min_distance, ray.direction));
let (normal, material) = match renderer_type {
RendererType::Cylinder => {
(self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material())
}
RendererType::Sphere => {
(self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material())
}
RendererType::Triangle => {
(self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material())
}
};
let hit_from_outside = dot(ray.direction, normal) < 0.0;
Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0]))
} else {
None
}
}
}
|
{
largest[j] = average[j];
}
|
conditional_block
|
renderer.rs
|
use std::f64;
use pcg_rand::Pcg32;
use rand::{Rng, SeedableRng};
use crate::hitpoint::Hitpoint;
use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub};
use crate::ray::Ray;
use crate::renderershape::RendererShape;
use crate::rendereroutputpixel::RendererOutputPixel;
use crate::rendererscene::RendererScene;
use crate::renderertype::RendererType;
use crate::NUMBER_OF_BINS;
//static MODULO: [usize; 5] = [0, 1, 2, 0, 1];
pub struct Renderer {
width: u32,
height: u32,
spp_per_iteration: u32,
maximum_spp: u32,
maximum_error: f64,
maximum_brdf_value: f64,
perform_post_process: bool,
scene: RendererScene,
renderer_output_pixels: Vec<RendererOutputPixel>
}
impl Renderer {
pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self {
let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new();
Self {
width,
height,
spp_per_iteration,
maximum_spp,
maximum_error,
maximum_brdf_value,
perform_post_process,
scene,
renderer_output_pixels,
}
}
pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> {
self.renderer_output_pixels.to_vec()
}
pub fn render(&mut self, y: u32, x: u32) {
if x == self.width/2 {
println!("Rendering row {} of {}.", y, self.height);
}
let number_of_light_spheres = self.scene.light_spheres.len();
let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32);
let number_of_cameras = self.scene.cameras.len();
self.renderer_output_pixels.push(RendererOutputPixel::new(y, x));
let last_pos = self.renderer_output_pixels.len()-1;
let mut pcg: Pcg32 = Pcg32::from_entropy();
let mut colors: Vec<[f64; 3]> = Vec::new();
let mut converged = false;
// Loop over this pixel until we estimate the error to be small enough.
let mut iterations = 0;
while!converged {
iterations += 1;
for _ in 0..self.spp_per_iteration {
// Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape.
let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new();
self.renderer_output_pixels[last_pos].number_of_rays += 1.0;
let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize;
let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg);
let mut color = self.scene.cameras[index].color;
self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg);
let mut total_color = [0.0, 0.0, 0.0];
if hitpoint_path_from_camera.is_empty() {
colors.push(total_color);
continue;
}
let direct_light_sampling = false;
if direct_light_sampling {
// Connect the camera path hitpoints with points on lights.
for hitpoint_in_camera_path in &hitpoint_path_from_camera {
let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg);
total_color = add(total_color, color);
}
self.store(last_pos, total_color);
colors.push(total_color);
} else {
for hitpoint in hitpoint_path_from_camera.iter().rev() {
total_color = elementwise_mul(total_color, hitpoint.material.color);
total_color = add(total_color, hitpoint.material.emission);
}
self.store(last_pos, total_color);
colors.push(total_color);
}
}
if colors.is_empty() {
break;
}
// Estimate the error. If it's too large, create more rays for this pixel.
let number_of_batches: usize = 20;
let number_of_rays = colors.len();
let batch_size = number_of_rays/number_of_batches;
let mut averages = self.averages(&colors, number_of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size!= 0.
fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] {
largest[j] = average[j];
}
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be.emission rather than.color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance = norm(direction);
let direction_normalised = mul(1.0/distance, direction);
if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 {
return [0.0, 0.0, 0.0];
}
let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised));
if let Some(closest_hitpoint) = closest_hitpoint {
// @TODO Check if this is sane.
if distance-closest_hitpoint.distance > 1.0e-9 {
return [0.0, 0.0, 0.0];
}
}
let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction);
let outgoing_direction = mul(-1.0, direction_normalised);
let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 {
hitpoint_in_camera_path.normal
} else {
mul(-1.0, hitpoint_in_camera_path.normal)
};
let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside {
(1.0, hitpoint_in_camera_path.material.refractive_index)
} else {
(hitpoint_in_camera_path.material.refractive_index, 1.0)
};
// @TODO: Get rid of the upper limit of the brdf.
let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value);
mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color))
}
fn store(&mut self, last_pos: usize, color: [f64; 3]) {
self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color);
self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color);
self.renderer_output_pixels[last_pos].number_of_bin_elements += 1;
if self.perform_post_process {
let color = intensity_to_color(color);
self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1;
}
}
// Find the closest hitpoint.
fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> {
let mut min_distance = f64::MAX;
let mut closest_renderer_shape_index: Option<usize> = None;
let mut renderer_type = RendererType::Cylinder;
for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() {
if!cylinder.active {
continue;
|
}
}
for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() {
if!sphere.active {
continue;
}
let distance = sphere.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Sphere;
}
}
for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() {
if!triangle.active {
continue;
}
let distance = triangle.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Triangle;
}
}
if let Some(index) = closest_renderer_shape_index {
let position = add(ray.position, mul(min_distance, ray.direction));
let (normal, material) = match renderer_type {
RendererType::Cylinder => {
(self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material())
}
RendererType::Sphere => {
(self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material())
}
RendererType::Triangle => {
(self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material())
}
};
let hit_from_outside = dot(ray.direction, normal) < 0.0;
Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0]))
} else {
None
}
}
}
|
}
let distance = cylinder.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
|
random_line_split
|
renderer.rs
|
use std::f64;
use pcg_rand::Pcg32;
use rand::{Rng, SeedableRng};
use crate::hitpoint::Hitpoint;
use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub};
use crate::ray::Ray;
use crate::renderershape::RendererShape;
use crate::rendereroutputpixel::RendererOutputPixel;
use crate::rendererscene::RendererScene;
use crate::renderertype::RendererType;
use crate::NUMBER_OF_BINS;
//static MODULO: [usize; 5] = [0, 1, 2, 0, 1];
pub struct Renderer {
width: u32,
height: u32,
spp_per_iteration: u32,
maximum_spp: u32,
maximum_error: f64,
maximum_brdf_value: f64,
perform_post_process: bool,
scene: RendererScene,
renderer_output_pixels: Vec<RendererOutputPixel>
}
impl Renderer {
pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self {
let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new();
Self {
width,
height,
spp_per_iteration,
maximum_spp,
maximum_error,
maximum_brdf_value,
perform_post_process,
scene,
renderer_output_pixels,
}
}
pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> {
self.renderer_output_pixels.to_vec()
}
pub fn render(&mut self, y: u32, x: u32) {
if x == self.width/2 {
println!("Rendering row {} of {}.", y, self.height);
}
let number_of_light_spheres = self.scene.light_spheres.len();
let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32);
let number_of_cameras = self.scene.cameras.len();
self.renderer_output_pixels.push(RendererOutputPixel::new(y, x));
let last_pos = self.renderer_output_pixels.len()-1;
let mut pcg: Pcg32 = Pcg32::from_entropy();
let mut colors: Vec<[f64; 3]> = Vec::new();
let mut converged = false;
// Loop over this pixel until we estimate the error to be small enough.
let mut iterations = 0;
while!converged {
iterations += 1;
for _ in 0..self.spp_per_iteration {
// Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape.
let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new();
self.renderer_output_pixels[last_pos].number_of_rays += 1.0;
let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize;
let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg);
let mut color = self.scene.cameras[index].color;
self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg);
let mut total_color = [0.0, 0.0, 0.0];
if hitpoint_path_from_camera.is_empty() {
colors.push(total_color);
continue;
}
let direct_light_sampling = false;
if direct_light_sampling {
// Connect the camera path hitpoints with points on lights.
for hitpoint_in_camera_path in &hitpoint_path_from_camera {
let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg);
total_color = add(total_color, color);
}
self.store(last_pos, total_color);
colors.push(total_color);
} else {
for hitpoint in hitpoint_path_from_camera.iter().rev() {
total_color = elementwise_mul(total_color, hitpoint.material.color);
total_color = add(total_color, hitpoint.material.emission);
}
self.store(last_pos, total_color);
colors.push(total_color);
}
}
if colors.is_empty() {
break;
}
// Estimate the error. If it's too large, create more rays for this pixel.
let number_of_batches: usize = 20;
let number_of_rays = colors.len();
let batch_size = number_of_rays/number_of_batches;
let mut averages = self.averages(&colors, number_of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size!= 0.
fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] {
largest[j] = average[j];
}
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be.emission rather than.color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance = norm(direction);
let direction_normalised = mul(1.0/distance, direction);
if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 {
return [0.0, 0.0, 0.0];
}
let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised));
if let Some(closest_hitpoint) = closest_hitpoint {
// @TODO Check if this is sane.
if distance-closest_hitpoint.distance > 1.0e-9 {
return [0.0, 0.0, 0.0];
}
}
let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction);
let outgoing_direction = mul(-1.0, direction_normalised);
let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 {
hitpoint_in_camera_path.normal
} else {
mul(-1.0, hitpoint_in_camera_path.normal)
};
let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside {
(1.0, hitpoint_in_camera_path.material.refractive_index)
} else {
(hitpoint_in_camera_path.material.refractive_index, 1.0)
};
// @TODO: Get rid of the upper limit of the brdf.
let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value);
mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color))
}
fn store(&mut self, last_pos: usize, color: [f64; 3])
|
// Find the closest hitpoint.
fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> {
let mut min_distance = f64::MAX;
let mut closest_renderer_shape_index: Option<usize> = None;
let mut renderer_type = RendererType::Cylinder;
for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() {
if!cylinder.active {
continue;
}
let distance = cylinder.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
}
}
for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() {
if!sphere.active {
continue;
}
let distance = sphere.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Sphere;
}
}
for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() {
if!triangle.active {
continue;
}
let distance = triangle.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i);
renderer_type = RendererType::Triangle;
}
}
if let Some(index) = closest_renderer_shape_index {
let position = add(ray.position, mul(min_distance, ray.direction));
let (normal, material) = match renderer_type {
RendererType::Cylinder => {
(self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material())
}
RendererType::Sphere => {
(self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material())
}
RendererType::Triangle => {
(self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material())
}
};
let hit_from_outside = dot(ray.direction, normal) < 0.0;
Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0]))
} else {
None
}
}
}
|
{
self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color);
self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color);
self.renderer_output_pixels[last_pos].number_of_bin_elements += 1;
if self.perform_post_process {
let color = intensity_to_color(color);
self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1;
}
}
|
identifier_body
|
de.rs
|
use serde::{
de::{DeserializeSeed, MapAccess, SeqAccess, Visitor},
forward_to_deserialize_any,
};
use serde::Deserialize;
use std::{cell::Cell, fmt};
#[derive(Debug, PartialEq)]
pub enum DeserializeError {
UnexpectedEOF,
WrongCharacter(u8),
End,
InfoHashMissing,
TooDeep,
NoFile,
EmptyFile,
UnalignedPieces,
Message(String),
}
type Result<T> = std::result::Result<T, DeserializeError>;
impl serde::de::Error for DeserializeError {
fn custom<T: fmt::Display>(msg: T) -> Self {
DeserializeError::Message(msg.to_string())
}
}
impl fmt::Display for DeserializeError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&format!("{:?}", self))
//formatter.write_str(std::error::Error::description(self))
}
}
impl std::error::Error for DeserializeError {
fn
|
(&self) -> &str {
"aa"
//self.msg.as_str()
}
}
pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
T::deserialize(&mut de)
}
pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
let res = T::deserialize(&mut de)?;
let info_hash = if!de.start_info.is_null() && de.end_info > de.start_info {
let len = de.end_info as usize - de.start_info as usize;
let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) };
sha1::Sha1::from(&slice[..]).digest().bytes().to_vec()
} else {
//eprintln!("START={:?} END={:?}", de.start_info, de.end_info);
return Err(DeserializeError::InfoHashMissing);
};
Ok((res, info_hash))
}
use crate::metadata::{InfoFile, MetaTorrent, Torrent};
pub fn read_meta(s: &[u8]) -> Result<Torrent> {
let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?;
if meta.info.pieces.len() % 20!= 0 {
return Err(DeserializeError::UnalignedPieces);
}
match &meta.info.files {
InfoFile::Multiple { files,.. } => {
if files.is_empty() {
return Err(DeserializeError::NoFile);
}
}
InfoFile::Single { length,.. } => {
if *length == 0 {
return Err(DeserializeError::EmptyFile);
}
}
}
Ok(Torrent {
meta,
info_hash: info_hash.into(),
})
}
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
#[doc(hidden)]
pub struct Deserializer<'de> {
input: &'de [u8],
start_info: *const u8,
end_info: *const u8,
info_depth: i64,
// Fix v2_deep_recursion.torrent
depth: Cell<u16>,
}
#[doc(hidbn)]
impl<'de> Deserializer<'de> {
fn new(input: &'de [u8]) -> Self {
Deserializer {
input,
start_info: std::ptr::null(),
end_info: std::ptr::null(),
info_depth: 0,
depth: Cell::new(0),
}
}
fn peek(&self) -> Option<u8> {
self.input.get(0).copied()
}
fn next(&mut self) -> Result<u8> {
if let Some(c) = self.peek() {
let _ = self.consume();
return Ok(c);
}
Err(DeserializeError::UnexpectedEOF)
}
fn consume(&mut self) -> Result<()> {
self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn skip(&mut self, n: i64) -> Result<()> {
self.input = &self
.input
.get(n as usize..)
.ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn read_integer(&mut self, stop: u8) -> Result<i64> {
let mut n: i64 = 0;
loop {
match self.next()? {
c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64,
c if c == stop => break,
c => return Err(DeserializeError::WrongCharacter(c)),
}
}
Ok(n)
}
fn read_number(&mut self) -> Result<i64> {
self.consume()?; // 'i'
let negative = match self.peek() {
Some(b'-') => {
self.consume()?;
true
}
_ => false,
};
let n = self.read_integer(b'e')?;
Ok(if negative { -n } else { n })
}
fn read_string(&mut self) -> Result<&'de [u8]> {
let len = self.read_integer(b':')?;
let s = self
.input
.get(..len as usize)
.ok_or(DeserializeError::UnexpectedEOF)?;
self.skip(len)?;
if s == b"info" {
//println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec()));
self.start_info = self.input.as_ptr();
self.info_depth = 1;
}
//println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec()));
Ok(s)
}
}
#[doc(hidden)]
impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = DeserializeError;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// println!("NEXT: {:?}", self.peek());
match self.peek().ok_or(DeserializeError::UnexpectedEOF)? {
b'i' => {
// println!("FOUND NUMBER", );
visitor.visit_i64(self.read_number()?)
}
b'l' => {
self.consume()?;
// println!("FOUND LIST {:?}", &self.input[..10]);
visitor.visit_seq(BencAccess::new(self))
}
b'd' => {
let depth = self.depth.get();
if depth > 100 {
return Err(DeserializeError::TooDeep);
}
self.depth.set(depth + 1);
// println!("FOUND DICT {}", self.depth.get());
self.consume()?;
visitor.visit_map(BencAccess::new(self))
}
_n @ b'0'..=b'9' => {
// println!("FOUND STRING", );
visitor.visit_borrowed_bytes(self.read_string()?)
}
c => Err(DeserializeError::WrongCharacter(c)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string
unit unit_struct seq tuple tuple_struct map struct identifier
newtype_struct ignored_any enum bytes byte_buf
}
}
struct BencAccess<'a, 'de> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BencAccess<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> {
if de.info_depth >= 1 {
de.info_depth += 1;
let _s = de.input;
//println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
}
BencAccess { de }
}
}
impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: DeserializeSeed<'de>,
{
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
self.de.info_depth -= 1;
if self.de.info_depth == 1 {
//println!("FOUND END!");
self.de.end_info = self.de.input.as_ptr();
}
let _s = self.de.input;
//println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: DeserializeSeed<'de>,
{
seed.deserialize(&mut *self.de)
}
}
impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// println!("LAAA {:?}", &self.de.input[..5]);
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
if self.de.info_depth >= 1 {
self.de.info_depth -= 1;
}
let _s = self.de.input;
//println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
// println!("DEPTH={}", self.de.info_depth);
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
}
#[allow(non_snake_case)]
#[cfg(test)]
mod tests {
use super::{from_bytes, DeserializeError, Result};
use serde::Deserialize;
#[test]
fn test_dict() {
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
c: &'b str,
X: &'b str,
}
let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap();
assert_eq!(
bc,
Dict {
a: 12453,
b: "aaa",
c: "bbb",
X: "0123456789",
}
);
}
#[test]
fn test_key_no_value() {
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
}
let res: Result<Dict> = from_bytes(b"d1:ai1e1:be");
println!("{:?}", res);
assert_eq!(res, Err(DeserializeError::WrongCharacter(101)));
}
#[test]
fn test_key_not_string() {
#[derive(Deserialize, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
}
let res: Result<Dict> = from_bytes(b"di5e1:ae");
println!("{:?}", res);
assert!(res.is_err());
}
// TODO: Add more tests from
// https://github.com/arvidn/libtorrent/blob/RC_1_2/test/test_bdecode.cpp
}
|
description
|
identifier_name
|
de.rs
|
use serde::{
de::{DeserializeSeed, MapAccess, SeqAccess, Visitor},
forward_to_deserialize_any,
};
use serde::Deserialize;
use std::{cell::Cell, fmt};
#[derive(Debug, PartialEq)]
pub enum DeserializeError {
UnexpectedEOF,
WrongCharacter(u8),
End,
InfoHashMissing,
TooDeep,
NoFile,
EmptyFile,
UnalignedPieces,
Message(String),
}
type Result<T> = std::result::Result<T, DeserializeError>;
impl serde::de::Error for DeserializeError {
fn custom<T: fmt::Display>(msg: T) -> Self {
DeserializeError::Message(msg.to_string())
}
}
impl fmt::Display for DeserializeError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&format!("{:?}", self))
//formatter.write_str(std::error::Error::description(self))
}
}
impl std::error::Error for DeserializeError {
fn description(&self) -> &str {
"aa"
//self.msg.as_str()
}
}
pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
T::deserialize(&mut de)
}
pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
let res = T::deserialize(&mut de)?;
let info_hash = if!de.start_info.is_null() && de.end_info > de.start_info {
let len = de.end_info as usize - de.start_info as usize;
let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) };
sha1::Sha1::from(&slice[..]).digest().bytes().to_vec()
} else {
//eprintln!("START={:?} END={:?}", de.start_info, de.end_info);
return Err(DeserializeError::InfoHashMissing);
};
Ok((res, info_hash))
}
use crate::metadata::{InfoFile, MetaTorrent, Torrent};
pub fn read_meta(s: &[u8]) -> Result<Torrent> {
let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?;
if meta.info.pieces.len() % 20!= 0 {
return Err(DeserializeError::UnalignedPieces);
}
match &meta.info.files {
InfoFile::Multiple { files,.. } => {
if files.is_empty() {
return Err(DeserializeError::NoFile);
}
}
InfoFile::Single { length,.. } => {
if *length == 0 {
return Err(DeserializeError::EmptyFile);
}
}
}
Ok(Torrent {
meta,
info_hash: info_hash.into(),
})
}
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
#[doc(hidden)]
pub struct Deserializer<'de> {
input: &'de [u8],
start_info: *const u8,
end_info: *const u8,
info_depth: i64,
// Fix v2_deep_recursion.torrent
depth: Cell<u16>,
}
#[doc(hidbn)]
impl<'de> Deserializer<'de> {
fn new(input: &'de [u8]) -> Self {
Deserializer {
input,
start_info: std::ptr::null(),
end_info: std::ptr::null(),
info_depth: 0,
depth: Cell::new(0),
}
}
fn peek(&self) -> Option<u8> {
self.input.get(0).copied()
}
fn next(&mut self) -> Result<u8> {
if let Some(c) = self.peek() {
let _ = self.consume();
return Ok(c);
}
Err(DeserializeError::UnexpectedEOF)
}
fn consume(&mut self) -> Result<()> {
self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn skip(&mut self, n: i64) -> Result<()> {
self.input = &self
.input
.get(n as usize..)
.ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn read_integer(&mut self, stop: u8) -> Result<i64> {
let mut n: i64 = 0;
loop {
match self.next()? {
c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64,
c if c == stop => break,
c => return Err(DeserializeError::WrongCharacter(c)),
}
}
Ok(n)
}
fn read_number(&mut self) -> Result<i64> {
self.consume()?; // 'i'
let negative = match self.peek() {
Some(b'-') => {
self.consume()?;
true
}
_ => false,
};
let n = self.read_integer(b'e')?;
Ok(if negative { -n } else { n })
}
fn read_string(&mut self) -> Result<&'de [u8]> {
let len = self.read_integer(b':')?;
let s = self
.input
.get(..len as usize)
.ok_or(DeserializeError::UnexpectedEOF)?;
self.skip(len)?;
if s == b"info" {
//println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec()));
self.start_info = self.input.as_ptr();
self.info_depth = 1;
}
//println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec()));
Ok(s)
}
}
#[doc(hidden)]
impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = DeserializeError;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// println!("NEXT: {:?}", self.peek());
match self.peek().ok_or(DeserializeError::UnexpectedEOF)? {
b'i' => {
// println!("FOUND NUMBER", );
visitor.visit_i64(self.read_number()?)
}
b'l' => {
self.consume()?;
// println!("FOUND LIST {:?}", &self.input[..10]);
visitor.visit_seq(BencAccess::new(self))
}
b'd' => {
let depth = self.depth.get();
if depth > 100 {
return Err(DeserializeError::TooDeep);
}
self.depth.set(depth + 1);
// println!("FOUND DICT {}", self.depth.get());
self.consume()?;
visitor.visit_map(BencAccess::new(self))
}
_n @ b'0'..=b'9' => {
// println!("FOUND STRING", );
visitor.visit_borrowed_bytes(self.read_string()?)
}
c => Err(DeserializeError::WrongCharacter(c)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string
unit unit_struct seq tuple tuple_struct map struct identifier
newtype_struct ignored_any enum bytes byte_buf
}
}
struct BencAccess<'a, 'de> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BencAccess<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> {
if de.info_depth >= 1 {
de.info_depth += 1;
let _s = de.input;
//println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
}
BencAccess { de }
}
}
impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: DeserializeSeed<'de>,
{
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
self.de.info_depth -= 1;
if self.de.info_depth == 1 {
//println!("FOUND END!");
self.de.end_info = self.de.input.as_ptr();
}
let _s = self.de.input;
//println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: DeserializeSeed<'de>,
{
seed.deserialize(&mut *self.de)
}
}
impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// println!("LAAA {:?}", &self.de.input[..5]);
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
if self.de.info_depth >= 1 {
self.de.info_depth -= 1;
}
let _s = self.de.input;
//println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
// println!("DEPTH={}", self.de.info_depth);
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
}
#[allow(non_snake_case)]
#[cfg(test)]
mod tests {
use super::{from_bytes, DeserializeError, Result};
use serde::Deserialize;
#[test]
fn test_dict()
|
}
#[test]
fn test_key_no_value() {
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
}
let res: Result<Dict> = from_bytes(b"d1:ai1e1:be");
println!("{:?}", res);
assert_eq!(res, Err(DeserializeError::WrongCharacter(101)));
}
#[test]
fn test_key_not_string() {
#[derive(Deserialize, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
}
let res: Result<Dict> = from_bytes(b"di5e1:ae");
println!("{:?}", res);
assert!(res.is_err());
}
// TODO: Add more tests from
// https://github.com/arvidn/libtorrent/blob/RC_1_2/test/test_bdecode.cpp
}
|
{
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
c: &'b str,
X: &'b str,
}
let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap();
assert_eq!(
bc,
Dict {
a: 12453,
b: "aaa",
c: "bbb",
X: "0123456789",
}
);
|
identifier_body
|
de.rs
|
use serde::{
de::{DeserializeSeed, MapAccess, SeqAccess, Visitor},
forward_to_deserialize_any,
};
use serde::Deserialize;
use std::{cell::Cell, fmt};
#[derive(Debug, PartialEq)]
pub enum DeserializeError {
UnexpectedEOF,
WrongCharacter(u8),
End,
InfoHashMissing,
TooDeep,
NoFile,
EmptyFile,
UnalignedPieces,
Message(String),
}
type Result<T> = std::result::Result<T, DeserializeError>;
impl serde::de::Error for DeserializeError {
fn custom<T: fmt::Display>(msg: T) -> Self {
DeserializeError::Message(msg.to_string())
}
}
impl fmt::Display for DeserializeError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&format!("{:?}", self))
//formatter.write_str(std::error::Error::description(self))
}
}
impl std::error::Error for DeserializeError {
fn description(&self) -> &str {
"aa"
//self.msg.as_str()
}
}
pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
T::deserialize(&mut de)
}
pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
let res = T::deserialize(&mut de)?;
let info_hash = if!de.start_info.is_null() && de.end_info > de.start_info {
let len = de.end_info as usize - de.start_info as usize;
let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) };
sha1::Sha1::from(&slice[..]).digest().bytes().to_vec()
} else {
//eprintln!("START={:?} END={:?}", de.start_info, de.end_info);
return Err(DeserializeError::InfoHashMissing);
};
Ok((res, info_hash))
}
use crate::metadata::{InfoFile, MetaTorrent, Torrent};
pub fn read_meta(s: &[u8]) -> Result<Torrent> {
let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?;
if meta.info.pieces.len() % 20!= 0 {
return Err(DeserializeError::UnalignedPieces);
}
match &meta.info.files {
InfoFile::Multiple { files,.. } => {
if files.is_empty() {
return Err(DeserializeError::NoFile);
}
}
InfoFile::Single { length,.. } => {
if *length == 0 {
return Err(DeserializeError::EmptyFile);
}
}
}
Ok(Torrent {
meta,
info_hash: info_hash.into(),
})
}
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
#[doc(hidden)]
pub struct Deserializer<'de> {
input: &'de [u8],
start_info: *const u8,
end_info: *const u8,
info_depth: i64,
// Fix v2_deep_recursion.torrent
depth: Cell<u16>,
}
#[doc(hidbn)]
impl<'de> Deserializer<'de> {
fn new(input: &'de [u8]) -> Self {
Deserializer {
input,
start_info: std::ptr::null(),
end_info: std::ptr::null(),
info_depth: 0,
depth: Cell::new(0),
}
}
fn peek(&self) -> Option<u8> {
self.input.get(0).copied()
}
fn next(&mut self) -> Result<u8> {
if let Some(c) = self.peek() {
let _ = self.consume();
return Ok(c);
}
Err(DeserializeError::UnexpectedEOF)
}
fn consume(&mut self) -> Result<()> {
self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn skip(&mut self, n: i64) -> Result<()> {
self.input = &self
.input
.get(n as usize..)
.ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn read_integer(&mut self, stop: u8) -> Result<i64> {
let mut n: i64 = 0;
loop {
match self.next()? {
c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64,
c if c == stop => break,
c => return Err(DeserializeError::WrongCharacter(c)),
}
}
Ok(n)
}
fn read_number(&mut self) -> Result<i64> {
self.consume()?; // 'i'
let negative = match self.peek() {
Some(b'-') => {
self.consume()?;
true
}
_ => false,
};
let n = self.read_integer(b'e')?;
Ok(if negative { -n } else { n })
}
fn read_string(&mut self) -> Result<&'de [u8]> {
let len = self.read_integer(b':')?;
let s = self
.input
.get(..len as usize)
.ok_or(DeserializeError::UnexpectedEOF)?;
self.skip(len)?;
if s == b"info" {
//println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec()));
self.start_info = self.input.as_ptr();
self.info_depth = 1;
}
//println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec()));
Ok(s)
}
}
|
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// println!("NEXT: {:?}", self.peek());
match self.peek().ok_or(DeserializeError::UnexpectedEOF)? {
b'i' => {
// println!("FOUND NUMBER", );
visitor.visit_i64(self.read_number()?)
}
b'l' => {
self.consume()?;
// println!("FOUND LIST {:?}", &self.input[..10]);
visitor.visit_seq(BencAccess::new(self))
}
b'd' => {
let depth = self.depth.get();
if depth > 100 {
return Err(DeserializeError::TooDeep);
}
self.depth.set(depth + 1);
// println!("FOUND DICT {}", self.depth.get());
self.consume()?;
visitor.visit_map(BencAccess::new(self))
}
_n @ b'0'..=b'9' => {
// println!("FOUND STRING", );
visitor.visit_borrowed_bytes(self.read_string()?)
}
c => Err(DeserializeError::WrongCharacter(c)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string
unit unit_struct seq tuple tuple_struct map struct identifier
newtype_struct ignored_any enum bytes byte_buf
}
}
struct BencAccess<'a, 'de> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BencAccess<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> {
if de.info_depth >= 1 {
de.info_depth += 1;
let _s = de.input;
//println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
}
BencAccess { de }
}
}
impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: DeserializeSeed<'de>,
{
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
self.de.info_depth -= 1;
if self.de.info_depth == 1 {
//println!("FOUND END!");
self.de.end_info = self.de.input.as_ptr();
}
let _s = self.de.input;
//println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: DeserializeSeed<'de>,
{
seed.deserialize(&mut *self.de)
}
}
impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// println!("LAAA {:?}", &self.de.input[..5]);
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
if self.de.info_depth >= 1 {
self.de.info_depth -= 1;
}
let _s = self.de.input;
//println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
// println!("DEPTH={}", self.de.info_depth);
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
}
#[allow(non_snake_case)]
#[cfg(test)]
mod tests {
use super::{from_bytes, DeserializeError, Result};
use serde::Deserialize;
#[test]
fn test_dict() {
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
c: &'b str,
X: &'b str,
}
let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap();
assert_eq!(
bc,
Dict {
a: 12453,
b: "aaa",
c: "bbb",
X: "0123456789",
}
);
}
#[test]
fn test_key_no_value() {
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
}
let res: Result<Dict> = from_bytes(b"d1:ai1e1:be");
println!("{:?}", res);
assert_eq!(res, Err(DeserializeError::WrongCharacter(101)));
}
#[test]
fn test_key_not_string() {
#[derive(Deserialize, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
}
let res: Result<Dict> = from_bytes(b"di5e1:ae");
println!("{:?}", res);
assert!(res.is_err());
}
// TODO: Add more tests from
// https://github.com/arvidn/libtorrent/blob/RC_1_2/test/test_bdecode.cpp
}
|
#[doc(hidden)]
impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = DeserializeError;
|
random_line_split
|
main.rs
|
multisample::MultisampleState,
rasterization::RasterizationState,
vertex_input::{Vertex, VertexDefinition},
viewport::{Viewport, ViewportState},
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout,
PipelineShaderStageCreateInfo,
},
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
shader::EntryPoint,
swapchain::{
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, GpuFuture},
Validated, VulkanError, VulkanLibrary,
};
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
fn main()
|
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
.map(|i| (p, i as u32))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, images) = {
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
POSITIONS,
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
NORMALS,
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
INDICES,
)
.unwrap();
let uniform_buffer = SubbufferAllocator::new(
memory_allocator.clone(),
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::UNIFORM_BUFFER,
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
);
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
format: swapchain.image_format(),
samples: 1,
load_op: Clear,
store_op: Store,
},
depth_stencil: {
format: Format::D16_UNORM,
samples: 1,
load_op: Clear,
store_op: DontCare,
},
},
pass: {
color: [color],
depth_stencil: {depth_stencil},
},
)
.unwrap();
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&images,
render_pass.clone(),
);
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let rotation_start = Instant::now();
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&new_images,
render_pass.clone(),
);
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let uniform_buffer_subbuffer = {
let elapsed = rotation_start.elapsed();
let rotation =
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0;
let rotation = Matrix3::from_angle_y(Rad(rotation as f32));
// note: this teapot was meant for OpenGL where the origin is at the lower left
// instead the origin is at the upper left in Vulkan, so we reverse the Y axis
let aspect_ratio =
swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32;
let proj = cgmath::perspective(
Rad(std::f32::consts::FRAC_PI_2),
aspect_ratio,
0.01,
100.0,
);
let view = Matrix4::look_at_rh(
Point3::new(0.3, 0.3, 1.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::new(0.0, -1.0, 0.0),
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
};
let subbuffer = uniform_buffer.allocate_sized().unwrap();
*subbuffer.write().unwrap() = uniform_data;
subbuffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
[WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)],
[],
)
.unwrap();
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate_swapchain = true;
}
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.begin_render_pass(
RenderPassBeginInfo {
clear_values: vec![
Some([0.0, 0.0, 1.0, 1.0].into()),
Some(1f32.into()),
],
..RenderPassBeginInfo::framebuffer(
framebuffers[image_index as usize].clone(),
)
},
Default::default(),
)
.unwrap()
.bind_pipeline_graphics(pipeline.clone())
.unwrap()
.bind_descriptor_sets(
PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
set,
)
.unwrap()
.bind_vertex_buffers(0, (vertex_buffer.clone(), normals_buffer.clone()))
.unwrap()
.bind_index_buffer(index_buffer.clone())
.unwrap()
.draw_indexed(index_buffer.len() as u32, 1, 0, 0, 0)
.unwrap()
.end_render_pass(Default::default())
.unwrap();
let command_buffer = builder.build().unwrap();
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
_ => (),
}
});
}
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
memory_allocator: &StandardMemoryAllocator,
vs: EntryPoint,
fs: EntryPoint,
images: &[Arc<Image>],
render_pass: Arc<RenderPass>,
) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) {
let extent = images[0].extent();
let depth_buffer = ImageView::new_default(
Image::new(
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::D16_UNORM,
extent: images[0].extent(),
usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT | ImageUsage::TRANSIENT_ATTACHMENT,
..Default::default()
},
AllocationCreateInfo::default(),
)
.unwrap(),
)
.unwrap();
let framebuffers = images
.iter()
.map(|image| {
let view = ImageView::new_default(image.clone()).unwrap();
Framebuffer::new(
render_pass.clone(),
FramebufferCreateInfo {
attachments: vec![view, depth_buffer.clone()],
..Default::default()
},
)
.unwrap()
})
.collect::<Vec<_>>();
// In the triangle example
|
{
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
let required_extensions = Surface::required_extensions(&event_loop);
let instance = Instance::new(
library,
InstanceCreateInfo {
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
|
identifier_body
|
main.rs
|
multisample::MultisampleState,
rasterization::RasterizationState,
vertex_input::{Vertex, VertexDefinition},
viewport::{Viewport, ViewportState},
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout,
PipelineShaderStageCreateInfo,
},
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
shader::EntryPoint,
swapchain::{
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, GpuFuture},
Validated, VulkanError, VulkanLibrary,
};
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
fn
|
() {
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
let required_extensions = Surface::required_extensions(&event_loop);
let instance = Instance::new(
library,
InstanceCreateInfo {
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
.map(|i| (p, i as u32))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, images) = {
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
POSITIONS,
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
NORMALS,
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
INDICES,
)
.unwrap();
let uniform_buffer = SubbufferAllocator::new(
memory_allocator.clone(),
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::UNIFORM_BUFFER,
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
);
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
format: swapchain.image_format(),
samples: 1,
load_op: Clear,
store_op: Store,
},
depth_stencil: {
format: Format::D16_UNORM,
samples: 1,
load_op: Clear,
store_op: DontCare,
},
},
pass: {
color: [color],
depth_stencil: {depth_stencil},
},
)
.unwrap();
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&images,
render_pass.clone(),
);
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let rotation_start = Instant::now();
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&new_images,
render_pass.clone(),
);
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let uniform_buffer_subbuffer = {
let elapsed = rotation_start.elapsed();
let rotation =
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0;
let rotation = Matrix3::from_angle_y(Rad(rotation as f32));
// note: this teapot was meant for OpenGL where the origin is at the lower left
// instead the origin is at the upper left in Vulkan, so we reverse the Y axis
let aspect_ratio =
swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32;
let proj = cgmath::perspective(
Rad(std::f32::consts::FRAC_PI_2),
aspect_ratio,
0.01,
100.0,
);
let view = Matrix4::look_at_rh(
Point3::new(0.3, 0.3, 1.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::new(0.0, -1.0, 0.0),
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
};
let subbuffer = uniform_buffer.allocate_sized().unwrap();
*subbuffer.write().unwrap() = uniform_data;
subbuffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
[WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)],
[],
)
.unwrap();
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate_swapchain = true;
}
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.begin_render_pass(
RenderPassBeginInfo {
clear_values: vec![
Some([0.0, 0.0, 1.0, 1.0].into()),
Some(1f32.into()),
],
..RenderPassBeginInfo::framebuffer(
framebuffers[image_index as usize].clone(),
)
},
Default::default(),
)
.unwrap()
.bind_pipeline_graphics(pipeline.clone())
.unwrap()
.bind_descriptor_sets(
PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
set,
)
.unwrap()
.bind_vertex_buffers(0, (vertex_buffer.clone(), normals_buffer.clone()))
.unwrap()
.bind_index_buffer(index_buffer.clone())
.unwrap()
.draw_indexed(index_buffer.len() as u32, 1, 0, 0, 0)
.unwrap()
.end_render_pass(Default::default())
.unwrap();
let command_buffer = builder.build().unwrap();
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
_ => (),
}
});
}
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
memory_allocator: &StandardMemoryAllocator,
vs: EntryPoint,
fs: EntryPoint,
images: &[Arc<Image>],
render_pass: Arc<RenderPass>,
) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) {
let extent = images[0].extent();
let depth_buffer = ImageView::new_default(
Image::new(
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::D16_UNORM,
extent: images[0].extent(),
usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT | ImageUsage::TRANSIENT_ATTACHMENT,
..Default::default()
},
AllocationCreateInfo::default(),
)
.unwrap(),
)
.unwrap();
let framebuffers = images
.iter()
.map(|image| {
let view = ImageView::new_default(image.clone()).unwrap();
Framebuffer::new(
render_pass.clone(),
FramebufferCreateInfo {
attachments: vec![view, depth_buffer.clone()],
..Default::default()
},
)
.unwrap()
})
.collect::<Vec<_>>();
// In the triangle example
|
main
|
identifier_name
|
main.rs
|
multisample::MultisampleState,
rasterization::RasterizationState,
vertex_input::{Vertex, VertexDefinition},
viewport::{Viewport, ViewportState},
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout,
PipelineShaderStageCreateInfo,
},
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
shader::EntryPoint,
swapchain::{
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, GpuFuture},
Validated, VulkanError, VulkanLibrary,
};
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
fn main() {
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
let required_extensions = Surface::required_extensions(&event_loop);
let instance = Instance::new(
library,
InstanceCreateInfo {
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
.map(|i| (p, i as u32))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, images) = {
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
POSITIONS,
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
NORMALS,
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
INDICES,
)
.unwrap();
let uniform_buffer = SubbufferAllocator::new(
memory_allocator.clone(),
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::UNIFORM_BUFFER,
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
);
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
format: swapchain.image_format(),
samples: 1,
load_op: Clear,
store_op: Store,
},
depth_stencil: {
format: Format::D16_UNORM,
samples: 1,
load_op: Clear,
store_op: DontCare,
},
},
pass: {
color: [color],
depth_stencil: {depth_stencil},
},
)
.unwrap();
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&images,
render_pass.clone(),
);
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let rotation_start = Instant::now();
|
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&new_images,
render_pass.clone(),
);
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let uniform_buffer_subbuffer = {
let elapsed = rotation_start.elapsed();
let rotation =
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0;
let rotation = Matrix3::from_angle_y(Rad(rotation as f32));
// note: this teapot was meant for OpenGL where the origin is at the lower left
// instead the origin is at the upper left in Vulkan, so we reverse the Y axis
let aspect_ratio =
swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32;
let proj = cgmath::perspective(
Rad(std::f32::consts::FRAC_PI_2),
aspect_ratio,
0.01,
100.0,
);
let view = Matrix4::look_at_rh(
Point3::new(0.3, 0.3, 1.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::new(0.0, -1.0, 0.0),
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
};
let subbuffer = uniform_buffer.allocate_sized().unwrap();
*subbuffer.write().unwrap() = uniform_data;
subbuffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
[WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)],
[],
)
.unwrap();
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate_swapchain = true;
}
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.begin_render_pass(
RenderPassBeginInfo {
clear_values: vec![
Some([0.0, 0.0, 1.0, 1.0].into()),
Some(1f32.into()),
],
..RenderPassBeginInfo::framebuffer(
framebuffers[image_index as usize].clone(),
)
},
Default::default(),
)
.unwrap()
.bind_pipeline_graphics(pipeline.clone())
.unwrap()
.bind_descriptor_sets(
PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
set,
)
.unwrap()
.bind_vertex_buffers(0, (vertex_buffer.clone(), normals_buffer.clone()))
.unwrap()
.bind_index_buffer(index_buffer.clone())
.unwrap()
.draw_indexed(index_buffer.len() as u32, 1, 0, 0, 0)
.unwrap()
.end_render_pass(Default::default())
.unwrap();
let command_buffer = builder.build().unwrap();
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
_ => (),
}
});
}
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
memory_allocator: &StandardMemoryAllocator,
vs: EntryPoint,
fs: EntryPoint,
images: &[Arc<Image>],
render_pass: Arc<RenderPass>,
) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) {
let extent = images[0].extent();
let depth_buffer = ImageView::new_default(
Image::new(
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::D16_UNORM,
extent: images[0].extent(),
usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT | ImageUsage::TRANSIENT_ATTACHMENT,
..Default::default()
},
AllocationCreateInfo::default(),
)
.unwrap(),
)
.unwrap();
let framebuffers = images
.iter()
.map(|image| {
let view = ImageView::new_default(image.clone()).unwrap();
Framebuffer::new(
render_pass.clone(),
FramebufferCreateInfo {
attachments: vec![view, depth_buffer.clone()],
..Default::default()
},
)
.unwrap()
})
.collect::<Vec<_>>();
// In the triangle example we use
|
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
|
random_line_split
|
fmlrc2.rs
|
extern crate clap;
extern crate env_logger;
extern crate exitcode;
extern crate log;
extern crate needletail;
use clap::{Arg, App, value_t, values_t};
use log::{info, error};
use needletail::parse_fastx_file;
use std::fs::File;
use std::sync::{Arc, mpsc};
use threadpool::ThreadPool;
use fmlrc::bv_bwt::BitVectorBWT;
use fmlrc::ordered_fasta_writer::OrderedFastaWriter;
use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job};
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
fn
|
() {
//initialize logging for our benefit later
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
//non-cli parameters
const JOB_SLOTS: u64 = 10000;
const UPDATE_INTERVAL: u64 = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if!(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
if read_index >= begin_id && read_index < end_id {
//if we've filled our queue, then we should wait until we get some results back
if jobs_queued - results_received >= JOB_SLOTS {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
//clone the transmit channel and submit the pool job
let tx = tx.clone();
let arc_bwt = arc_bwt.clone();
let arc_params = arc_params.clone();
let read_data: LongReadFA = LongReadFA {
read_index: jobs_queued,
label: String::from_utf8(record.id().to_vec()).unwrap(),
seq: String::from_utf8(record.seq().to_vec()).unwrap()
};
//println!("Submitting {:?}", jobs_queued);
pool.execute(move|| {
let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params);
tx.send(correction_results).expect("channel will be there waiting for the pool");
});
jobs_queued += 1;
}
read_index += 1;
}
},
Err(e) => {
error!("Failed to open long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
}
while results_received < jobs_queued {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id);
}
|
main
|
identifier_name
|
fmlrc2.rs
|
extern crate clap;
extern crate env_logger;
extern crate exitcode;
extern crate log;
extern crate needletail;
use clap::{Arg, App, value_t, values_t};
use log::{info, error};
use needletail::parse_fastx_file;
use std::fs::File;
use std::sync::{Arc, mpsc};
use threadpool::ThreadPool;
use fmlrc::bv_bwt::BitVectorBWT;
use fmlrc::ordered_fasta_writer::OrderedFastaWriter;
use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job};
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
fn main() {
//initialize logging for our benefit later
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
//non-cli parameters
const JOB_SLOTS: u64 = 10000;
const UPDATE_INTERVAL: u64 = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
|
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if!(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
if read_index >= begin_id && read_index < end_id {
//if we've filled our queue, then we should wait until we get some results back
if jobs_queued - results_received >= JOB_SLOTS {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
//clone the transmit channel and submit the pool job
let tx = tx.clone();
let arc_bwt = arc_bwt.clone();
let arc_params = arc_params.clone();
let read_data: LongReadFA = LongReadFA {
read_index: jobs_queued,
label: String::from_utf8(record.id().to_vec()).unwrap(),
seq: String::from_utf8(record.seq().to_vec()).unwrap()
};
//println!("Submitting {:?}", jobs_queued);
pool.execute(move|| {
let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params);
tx.send(correction_results).expect("channel will be there waiting for the pool");
});
jobs_queued += 1;
}
read_index += 1;
}
},
Err(e) => {
error!("Failed to open long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
}
while results_received < jobs_queued {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id);
}
|
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
|
random_line_split
|
fmlrc2.rs
|
extern crate clap;
extern crate env_logger;
extern crate exitcode;
extern crate log;
extern crate needletail;
use clap::{Arg, App, value_t, values_t};
use log::{info, error};
use needletail::parse_fastx_file;
use std::fs::File;
use std::sync::{Arc, mpsc};
use threadpool::ThreadPool;
use fmlrc::bv_bwt::BitVectorBWT;
use fmlrc::ordered_fasta_writer::OrderedFastaWriter;
use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job};
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
fn main() {
//initialize logging for our benefit later
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
//non-cli parameters
const JOB_SLOTS: u64 = 10000;
const UPDATE_INTERVAL: u64 = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if!(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
if read_index >= begin_id && read_index < end_id {
//if we've filled our queue, then we should wait until we get some results back
if jobs_queued - results_received >= JOB_SLOTS
|
//clone the transmit channel and submit the pool job
let tx = tx.clone();
let arc_bwt = arc_bwt.clone();
let arc_params = arc_params.clone();
let read_data: LongReadFA = LongReadFA {
read_index: jobs_queued,
label: String::from_utf8(record.id().to_vec()).unwrap(),
seq: String::from_utf8(record.seq().to_vec()).unwrap()
};
//println!("Submitting {:?}", jobs_queued);
pool.execute(move|| {
let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params);
tx.send(correction_results).expect("channel will be there waiting for the pool");
});
jobs_queued += 1;
}
read_index += 1;
}
},
Err(e) => {
error!("Failed to open long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
}
while results_received < jobs_queued {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id);
}
|
{
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
|
conditional_block
|
fmlrc2.rs
|
extern crate clap;
extern crate env_logger;
extern crate exitcode;
extern crate log;
extern crate needletail;
use clap::{Arg, App, value_t, values_t};
use log::{info, error};
use needletail::parse_fastx_file;
use std::fs::File;
use std::sync::{Arc, mpsc};
use threadpool::ThreadPool;
use fmlrc::bv_bwt::BitVectorBWT;
use fmlrc::ordered_fasta_writer::OrderedFastaWriter;
use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job};
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
fn main()
|
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if!(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
if read_index >= begin_id && read_index < end_id {
//if we've filled our queue, then we should wait until we get some results back
if jobs_queued - results_received >= JOB_SLOTS {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
//clone the transmit channel and submit the pool job
let tx = tx.clone();
let arc_bwt = arc_bwt.clone();
let arc_params = arc_params.clone();
let read_data: LongReadFA = LongReadFA {
read_index: jobs_queued,
label: String::from_utf8(record.id().to_vec()).unwrap(),
seq: String::from_utf8(record.seq().to_vec()).unwrap()
};
//println!("Submitting {:?}", jobs_queued);
pool.execute(move|| {
let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params);
tx.send(correction_results).expect("channel will be there waiting for the pool");
});
jobs_queued += 1;
}
read_index += 1;
}
},
Err(e) => {
error!("Failed to open long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
}
while results_received < jobs_queued {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
}
info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id);
}
|
{
//initialize logging for our benefit later
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
//non-cli parameters
const JOB_SLOTS: u64 = 10000;
const UPDATE_INTERVAL: u64 = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
|
identifier_body
|
browser.rs
|
command_response, CommandMessage};
use crate::conn::Connection;
use crate::error::{CdpError, Result};
use crate::handler::browser::BrowserContext;
use crate::handler::viewport::Viewport;
use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT};
use crate::listeners::{EventListenerRequest, EventStream};
use crate::page::Page;
use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns};
/// A [`Browser`] is created when chromiumoxide connects to a Chromium instance.
#[derive(Debug)]
pub struct Browser {
/// The `Sender` to send messages to the connection handler that drives the
/// websocket
sender: Sender<HandlerMessage>,
/// How the spawned chromium instance was configured, if any
config: Option<BrowserConfig>,
/// The spawned chromium instance
child: Option<Child>,
/// The debug web socket url of the chromium instance
debug_ws_url: String,
/// The context of the browser
browser_context: BrowserContext,
}
impl Browser {
/// Connect to an already running chromium instance via websocket
pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> {
let debug_ws_url = debug_ws_url.into();
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let fut = Handler::new(conn, rx, HandlerConfig::default());
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: None,
child: None,
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// Launches a new instance of `chromium` in the background and attaches to
/// its debug web socket.
///
/// This fails when no chromium executable could be detected.
///
/// This fails if no web socket url could be detected from the child
/// processes stderr for more than 20 seconds.
pub async fn launch(config: BrowserConfig) -> Result<(Self, Handler)> {
// launch a new chromium instance
let mut child = config.launch()?;
// extract the ws:
let get_ws_url = ws_url_from_output(&mut child);
let dur = Duration::from_secs(20);
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
let debug_ws_url = async_std::future::timeout(dur, get_ws_url)
.await
.map_err(|_| CdpError::Timeout)?;
} else if #[cfg(feature = "tokio-runtime")] {
let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await
.map_err(|_| CdpError::Timeout)?;
}
}
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let handler_config = HandlerConfig {
ignore_https_errors: config.ignore_https_errors,
viewport: Some(config.viewport.clone()),
context_ids: Vec::new(),
request_timeout: config.request_timeout,
};
let fut = Handler::new(conn, rx, handler_config);
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: Some(config),
child: Some(child),
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// If not launched as incognito this creates a new incognito browser
/// context. After that this browser exists within the incognito session.
/// New pages created while being in incognito mode will also run in the
/// incognito context. Incognito contexts won't share cookies/cache with
/// other browser contexts.
pub async fn start_incognito_context(&mut self) -> Result<&mut Self> {
if!self.is_incognito_configured() {
let resp = self
.execute(CreateBrowserContextParams::default())
.await?
.result;
self.browser_context = BrowserContext::from(resp.browser_context_id);
self.sender
.clone()
.send(HandlerMessage::InsertContext(self.browser_context.clone()))
.await?;
}
Ok(self)
}
/// If a incognito session was created with
/// `Browser::start_incognito_context` this disposes this context.
///
/// # Note This will also dispose all pages that were running within the
/// incognito context.
pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> {
if let Some(id) = self.browser_context.take() {
self.execute(DisposeBrowserContextParams::new(id.clone()))
.await?;
self.sender
.clone()
.send(HandlerMessage::DisposeContext(BrowserContext::from(id)))
.await?;
}
Ok(self)
}
/// Whether incognito mode was configured from the start
fn is_incognito_configured(&self) -> bool {
self.config
.as_ref()
.map(|c| c.incognito)
.unwrap_or_default()
}
/// Returns the address of the websocket this browser is attached to
pub fn websocket_address(&self) -> &String {
&self.debug_ws_url
}
/// Whether the BrowserContext is incognito.
pub fn is_incognito(&self) -> bool {
self.is_incognito_configured() || self.browser_context.is_incognito()
}
/// The config of the spawned chromium instance if any.
pub fn config(&self) -> Option<&BrowserConfig> {
self.config.as_ref()
}
/// Create a new browser page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser") {
return ws.trim().to_string();
}
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
self.window_size = Some((width, height));
self
}
pub fn no_sandbox(mut self) -> Self {
self.sandbox = false;
self
}
pub fn with_head(mut self) -> Self {
self.headless = false;
self
}
pub fn incognito(mut self) -> Self {
self.incognito = true;
self
}
pub fn respect_https_errors(mut self) -> Self {
self.ignore_https_errors = false;
self
}
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn viewport(mut self, viewport: Viewport) -> Self {
self.viewport = viewport;
self
}
pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self {
self.user_data_dir = Some(data_dir.as_ref().to_path_buf());
self
}
pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self {
self.executable = Some(path.as_ref().to_path_buf());
self
}
pub fn extension(mut self, extension: impl Into<String>) -> Self {
self.extensions.push(extension.into());
self
}
pub fn extensions<I, S>(mut self, extensions: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for ext in extensions {
self.extensions.push(ext.into());
}
self
}
pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self {
self.process_envs
.get_or_insert(HashMap::new())
.insert(key.into(), val.into());
self
}
pub fn envs<I, K, V>(mut self, envs: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: Into<String>,
V: Into<String>,
{
self.process_envs
.get_or_insert(HashMap::new())
.extend(envs.into_iter().map(|(k, v)| (k.into(), v.into())));
self
}
pub fn arg(mut self, arg: impl Into<String>) -> Self {
self.args.push(arg.into());
self
}
pub fn args<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for arg in args {
|
}
self
}
pub fn build(self) -> std::result::Result<BrowserConfig, String> {
let executable = if let Some(e) = self.executable {
e
} else {
default_executable()?
};
Ok(BrowserConfig {
headless: self.headless,
sandbox: self.sandbox,
window_size: self.window_size,
port: self.port,
executable,
extensions: self.extensions,
process_envs: self.process_envs,
user_data_dir: self.user_data_dir,
incognito: self.incognito,
ignore_https_errors: self.ignore_https_errors,
viewport: self.viewport,
request_timeout: self.request_timeout,
args: self.args,
})
}
}
impl BrowserConfig {
pub fn launch(&self) -> io::Result<Child> {
let dbg_port = format!("--remote-debugging-port={}", self.port);
let args = [
dbg_port.as_str(),
"--disable-background-networking",
"--enable-features=NetworkService,NetworkServiceInProcess",
"--disable-background-timer-throttling",
"--disable-backgrounding-occluded-windows",
"--disable-breakpad",
"--disable-client-side-phishing-detection",
"--disable-component-extensions-with-background-pages",
"--disable-default-apps",
"--disable-dev-shm-usage",
"--disable-extensions",
"--disable-features=TranslateUI",
"--disable-hang-monitor",
"--disable-ipc-flooding-protection",
"--disable-popup-blocking",
"--disable-prompt-on-repost",
"--disable-renderer-backgrounding",
"--disable-sync",
"--force-color-profile=srgb",
"--metrics-recording-only",
"--no-first-run",
"--enable-automation",
"--password-store=basic",
"--use-mock-keychain",
"--enable-blink-features=IdleDetection",
];
let mut cmd = process::Command::new(&self.executable);
cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args(
self.extensions
.iter()
.map(|e| format!("--load-extension={}", e)),
);
if let Some(ref user_data) = self.user_data_dir {
cmd.arg(format!("--user-data-dir={}", user_data.display()));
}
if let Some((width, height)) = self.window_size {
cmd.arg(format!("--window-size={},{}", width, height));
}
if!self.sandbox {
cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]);
}
if self.headless {
cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]);
}
if self.incognito {
cmd.arg("--incognito");
}
if let Some(ref envs) = self.process_envs {
cmd.envs(envs);
}
cmd.stderr(Stdio::piped()).spawn()
}
}
/// Returns the path to Chrome's executable.
///
/// If the `CHROME` environment variable is set, `default_executable` will
/// use it as the default path. Otherwise, the filenames `google-chrome-stable`
/// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are
/// searched for in standard places. If that fails,
/// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on
/// Windows) is consulted. If all of the above fail, an error is returned.
pub fn default_executable() -> Result<std::path::PathBuf, String> {
if let Ok(path) = std::env::var("CHROME") {
if std::path::Path::new(&path).exists() {
return Ok(path.into());
}
}
for app in &[
"google-chrome-stable",
"chromium",
"chromium-browser",
"chrome",
"chrome-browser",
] {
if let Ok(path) = which::which(app) {
return Ok(path);
}
}
#[cfg(target_os = "macos")]
{
let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..];
for path in default_paths {
if std::path::Path::new(path).exists() {
return Ok(path.into());
}
}
}
#[cfg(windows)]
{
if let Some(path) = get_chrome_path_from_windows_registry() {
if path.exists() {
return Ok(path);
}
}
}
Err("Could not auto detect a chrome executable".to_string())
}
#[cfg(windows)]
pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> {
winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE)
.open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe")
|
self.args.push(arg.into());
|
random_line_split
|
browser.rs
|
_response, CommandMessage};
use crate::conn::Connection;
use crate::error::{CdpError, Result};
use crate::handler::browser::BrowserContext;
use crate::handler::viewport::Viewport;
use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT};
use crate::listeners::{EventListenerRequest, EventStream};
use crate::page::Page;
use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns};
/// A [`Browser`] is created when chromiumoxide connects to a Chromium instance.
#[derive(Debug)]
pub struct Browser {
/// The `Sender` to send messages to the connection handler that drives the
/// websocket
sender: Sender<HandlerMessage>,
/// How the spawned chromium instance was configured, if any
config: Option<BrowserConfig>,
/// The spawned chromium instance
child: Option<Child>,
/// The debug web socket url of the chromium instance
debug_ws_url: String,
/// The context of the browser
browser_context: BrowserContext,
}
impl Browser {
/// Connect to an already running chromium instance via websocket
pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> {
let debug_ws_url = debug_ws_url.into();
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let fut = Handler::new(conn, rx, HandlerConfig::default());
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: None,
child: None,
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// Launches a new instance of `chromium` in the background and attaches to
/// its debug web socket.
///
/// This fails when no chromium executable could be detected.
///
/// This fails if no web socket url could be detected from the child
/// processes stderr for more than 20 seconds.
pub async fn launch(config: BrowserConfig) -> Result<(Self, Handler)> {
// launch a new chromium instance
let mut child = config.launch()?;
// extract the ws:
let get_ws_url = ws_url_from_output(&mut child);
let dur = Duration::from_secs(20);
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
let debug_ws_url = async_std::future::timeout(dur, get_ws_url)
.await
.map_err(|_| CdpError::Timeout)?;
} else if #[cfg(feature = "tokio-runtime")] {
let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await
.map_err(|_| CdpError::Timeout)?;
}
}
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let handler_config = HandlerConfig {
ignore_https_errors: config.ignore_https_errors,
viewport: Some(config.viewport.clone()),
context_ids: Vec::new(),
request_timeout: config.request_timeout,
};
let fut = Handler::new(conn, rx, handler_config);
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: Some(config),
child: Some(child),
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// If not launched as incognito this creates a new incognito browser
/// context. After that this browser exists within the incognito session.
/// New pages created while being in incognito mode will also run in the
/// incognito context. Incognito contexts won't share cookies/cache with
/// other browser contexts.
pub async fn start_incognito_context(&mut self) -> Result<&mut Self> {
if!self.is_incognito_configured() {
let resp = self
.execute(CreateBrowserContextParams::default())
.await?
.result;
self.browser_context = BrowserContext::from(resp.browser_context_id);
self.sender
.clone()
.send(HandlerMessage::InsertContext(self.browser_context.clone()))
.await?;
}
Ok(self)
}
/// If a incognito session was created with
/// `Browser::start_incognito_context` this disposes this context.
///
/// # Note This will also dispose all pages that were running within the
/// incognito context.
pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> {
if let Some(id) = self.browser_context.take() {
self.execute(DisposeBrowserContextParams::new(id.clone()))
.await?;
self.sender
.clone()
.send(HandlerMessage::DisposeContext(BrowserContext::from(id)))
.await?;
}
Ok(self)
}
/// Whether incognito mode was configured from the start
fn is_incognito_configured(&self) -> bool {
self.config
.as_ref()
.map(|c| c.incognito)
.unwrap_or_default()
}
/// Returns the address of the websocket this browser is attached to
pub fn websocket_address(&self) -> &String {
&self.debug_ws_url
}
/// Whether the BrowserContext is incognito.
pub fn is_incognito(&self) -> bool {
self.is_incognito_configured() || self.browser_context.is_incognito()
}
/// The config of the spawned chromium instance if any.
pub fn config(&self) -> Option<&BrowserConfig> {
self.config.as_ref()
}
/// Create a new browser page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser")
|
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
self.window_size = Some((width, height));
self
}
pub fn no_sandbox(mut self) -> Self {
self.sandbox = false;
self
}
pub fn with_head(mut self) -> Self {
self.headless = false;
self
}
pub fn incognito(mut self) -> Self {
self.incognito = true;
self
}
pub fn respect_https_errors(mut self) -> Self {
self.ignore_https_errors = false;
self
}
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn viewport(mut self, viewport: Viewport) -> Self {
self.viewport = viewport;
self
}
pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self {
self.user_data_dir = Some(data_dir.as_ref().to_path_buf());
self
}
pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self {
self.executable = Some(path.as_ref().to_path_buf());
self
}
pub fn extension(mut self, extension: impl Into<String>) -> Self {
self.extensions.push(extension.into());
self
}
pub fn extensions<I, S>(mut self, extensions: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for ext in extensions {
self.extensions.push(ext.into());
}
self
}
pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self {
self.process_envs
.get_or_insert(HashMap::new())
.insert(key.into(), val.into());
self
}
pub fn envs<I, K, V>(mut self, envs: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: Into<String>,
V: Into<String>,
{
self.process_envs
.get_or_insert(HashMap::new())
.extend(envs.into_iter().map(|(k, v)| (k.into(), v.into())));
self
}
pub fn arg(mut self, arg: impl Into<String>) -> Self {
self.args.push(arg.into());
self
}
pub fn args<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for arg in args {
self.args.push(arg.into());
}
self
}
pub fn build(self) -> std::result::Result<BrowserConfig, String> {
let executable = if let Some(e) = self.executable {
e
} else {
default_executable()?
};
Ok(BrowserConfig {
headless: self.headless,
sandbox: self.sandbox,
window_size: self.window_size,
port: self.port,
executable,
extensions: self.extensions,
process_envs: self.process_envs,
user_data_dir: self.user_data_dir,
incognito: self.incognito,
ignore_https_errors: self.ignore_https_errors,
viewport: self.viewport,
request_timeout: self.request_timeout,
args: self.args,
})
}
}
impl BrowserConfig {
pub fn launch(&self) -> io::Result<Child> {
let dbg_port = format!("--remote-debugging-port={}", self.port);
let args = [
dbg_port.as_str(),
"--disable-background-networking",
"--enable-features=NetworkService,NetworkServiceInProcess",
"--disable-background-timer-throttling",
"--disable-backgrounding-occluded-windows",
"--disable-breakpad",
"--disable-client-side-phishing-detection",
"--disable-component-extensions-with-background-pages",
"--disable-default-apps",
"--disable-dev-shm-usage",
"--disable-extensions",
"--disable-features=TranslateUI",
"--disable-hang-monitor",
"--disable-ipc-flooding-protection",
"--disable-popup-blocking",
"--disable-prompt-on-repost",
"--disable-renderer-backgrounding",
"--disable-sync",
"--force-color-profile=srgb",
"--metrics-recording-only",
"--no-first-run",
"--enable-automation",
"--password-store=basic",
"--use-mock-keychain",
"--enable-blink-features=IdleDetection",
];
let mut cmd = process::Command::new(&self.executable);
cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args(
self.extensions
.iter()
.map(|e| format!("--load-extension={}", e)),
);
if let Some(ref user_data) = self.user_data_dir {
cmd.arg(format!("--user-data-dir={}", user_data.display()));
}
if let Some((width, height)) = self.window_size {
cmd.arg(format!("--window-size={},{}", width, height));
}
if!self.sandbox {
cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]);
}
if self.headless {
cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]);
}
if self.incognito {
cmd.arg("--incognito");
}
if let Some(ref envs) = self.process_envs {
cmd.envs(envs);
}
cmd.stderr(Stdio::piped()).spawn()
}
}
/// Returns the path to Chrome's executable.
///
/// If the `CHROME` environment variable is set, `default_executable` will
/// use it as the default path. Otherwise, the filenames `google-chrome-stable`
/// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are
/// searched for in standard places. If that fails,
/// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on
/// Windows) is consulted. If all of the above fail, an error is returned.
pub fn default_executable() -> Result<std::path::PathBuf, String> {
if let Ok(path) = std::env::var("CHROME") {
if std::path::Path::new(&path).exists() {
return Ok(path.into());
}
}
for app in &[
"google-chrome-stable",
"chromium",
"chromium-browser",
"chrome",
"chrome-browser",
] {
if let Ok(path) = which::which(app) {
return Ok(path);
}
}
#[cfg(target_os = "macos")]
{
let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..];
for path in default_paths {
if std::path::Path::new(path).exists() {
return Ok(path.into());
}
}
}
#[cfg(windows)]
{
if let Some(path) = get_chrome_path_from_windows_registry() {
if path.exists() {
return Ok(path);
}
}
}
Err("Could not auto detect a chrome executable".to_string())
}
#[cfg(windows)]
pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> {
winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE)
.open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe")
|
{
return ws.trim().to_string();
}
|
conditional_block
|
browser.rs
|
_response, CommandMessage};
use crate::conn::Connection;
use crate::error::{CdpError, Result};
use crate::handler::browser::BrowserContext;
use crate::handler::viewport::Viewport;
use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT};
use crate::listeners::{EventListenerRequest, EventStream};
use crate::page::Page;
use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns};
/// A [`Browser`] is created when chromiumoxide connects to a Chromium instance.
#[derive(Debug)]
pub struct Browser {
/// The `Sender` to send messages to the connection handler that drives the
/// websocket
sender: Sender<HandlerMessage>,
/// How the spawned chromium instance was configured, if any
config: Option<BrowserConfig>,
/// The spawned chromium instance
child: Option<Child>,
/// The debug web socket url of the chromium instance
debug_ws_url: String,
/// The context of the browser
browser_context: BrowserContext,
}
impl Browser {
/// Connect to an already running chromium instance via websocket
pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> {
let debug_ws_url = debug_ws_url.into();
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let fut = Handler::new(conn, rx, HandlerConfig::default());
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: None,
child: None,
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// Launches a new instance of `chromium` in the background and attaches to
/// its debug web socket.
///
/// This fails when no chromium executable could be detected.
///
/// This fails if no web socket url could be detected from the child
/// processes stderr for more than 20 seconds.
pub async fn launch(config: BrowserConfig) -> Result<(Self, Handler)> {
// launch a new chromium instance
let mut child = config.launch()?;
// extract the ws:
let get_ws_url = ws_url_from_output(&mut child);
let dur = Duration::from_secs(20);
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
let debug_ws_url = async_std::future::timeout(dur, get_ws_url)
.await
.map_err(|_| CdpError::Timeout)?;
} else if #[cfg(feature = "tokio-runtime")] {
let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await
.map_err(|_| CdpError::Timeout)?;
}
}
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let handler_config = HandlerConfig {
ignore_https_errors: config.ignore_https_errors,
viewport: Some(config.viewport.clone()),
context_ids: Vec::new(),
request_timeout: config.request_timeout,
};
let fut = Handler::new(conn, rx, handler_config);
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: Some(config),
child: Some(child),
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// If not launched as incognito this creates a new incognito browser
/// context. After that this browser exists within the incognito session.
/// New pages created while being in incognito mode will also run in the
/// incognito context. Incognito contexts won't share cookies/cache with
/// other browser contexts.
pub async fn start_incognito_context(&mut self) -> Result<&mut Self> {
if!self.is_incognito_configured() {
let resp = self
.execute(CreateBrowserContextParams::default())
.await?
.result;
self.browser_context = BrowserContext::from(resp.browser_context_id);
self.sender
.clone()
.send(HandlerMessage::InsertContext(self.browser_context.clone()))
.await?;
}
Ok(self)
}
/// If a incognito session was created with
/// `Browser::start_incognito_context` this disposes this context.
///
/// # Note This will also dispose all pages that were running within the
/// incognito context.
pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> {
if let Some(id) = self.browser_context.take() {
self.execute(DisposeBrowserContextParams::new(id.clone()))
.await?;
self.sender
.clone()
.send(HandlerMessage::DisposeContext(BrowserContext::from(id)))
.await?;
}
Ok(self)
}
/// Whether incognito mode was configured from the start
fn is_incognito_configured(&self) -> bool {
self.config
.as_ref()
.map(|c| c.incognito)
.unwrap_or_default()
}
/// Returns the address of the websocket this browser is attached to
pub fn websocket_address(&self) -> &String {
&self.debug_ws_url
}
/// Whether the BrowserContext is incognito.
pub fn is_incognito(&self) -> bool {
self.is_incognito_configured() || self.browser_context.is_incognito()
}
/// The config of the spawned chromium instance if any.
pub fn config(&self) -> Option<&BrowserConfig> {
self.config.as_ref()
}
/// Create a new browser page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser") {
return ws.trim().to_string();
}
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
self.window_size = Some((width, height));
self
}
pub fn no_sandbox(mut self) -> Self {
self.sandbox = false;
self
}
pub fn with_head(mut self) -> Self {
self.headless = false;
self
}
pub fn incognito(mut self) -> Self {
self.incognito = true;
self
}
pub fn respect_https_errors(mut self) -> Self {
self.ignore_https_errors = false;
self
}
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn viewport(mut self, viewport: Viewport) -> Self {
self.viewport = viewport;
self
}
pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self {
self.user_data_dir = Some(data_dir.as_ref().to_path_buf());
self
}
pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self {
self.executable = Some(path.as_ref().to_path_buf());
self
}
pub fn extension(mut self, extension: impl Into<String>) -> Self {
self.extensions.push(extension.into());
self
}
pub fn extensions<I, S>(mut self, extensions: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for ext in extensions {
self.extensions.push(ext.into());
}
self
}
pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self {
self.process_envs
.get_or_insert(HashMap::new())
.insert(key.into(), val.into());
self
}
pub fn envs<I, K, V>(mut self, envs: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: Into<String>,
V: Into<String>,
{
self.process_envs
.get_or_insert(HashMap::new())
.extend(envs.into_iter().map(|(k, v)| (k.into(), v.into())));
self
}
pub fn arg(mut self, arg: impl Into<String>) -> Self {
self.args.push(arg.into());
self
}
pub fn args<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for arg in args {
self.args.push(arg.into());
}
self
}
pub fn build(self) -> std::result::Result<BrowserConfig, String> {
let executable = if let Some(e) = self.executable {
e
} else {
default_executable()?
};
Ok(BrowserConfig {
headless: self.headless,
sandbox: self.sandbox,
window_size: self.window_size,
port: self.port,
executable,
extensions: self.extensions,
process_envs: self.process_envs,
user_data_dir: self.user_data_dir,
incognito: self.incognito,
ignore_https_errors: self.ignore_https_errors,
viewport: self.viewport,
request_timeout: self.request_timeout,
args: self.args,
})
}
}
impl BrowserConfig {
pub fn launch(&self) -> io::Result<Child> {
let dbg_port = format!("--remote-debugging-port={}", self.port);
let args = [
dbg_port.as_str(),
"--disable-background-networking",
"--enable-features=NetworkService,NetworkServiceInProcess",
"--disable-background-timer-throttling",
"--disable-backgrounding-occluded-windows",
"--disable-breakpad",
"--disable-client-side-phishing-detection",
"--disable-component-extensions-with-background-pages",
"--disable-default-apps",
"--disable-dev-shm-usage",
"--disable-extensions",
"--disable-features=TranslateUI",
"--disable-hang-monitor",
"--disable-ipc-flooding-protection",
"--disable-popup-blocking",
"--disable-prompt-on-repost",
"--disable-renderer-backgrounding",
"--disable-sync",
"--force-color-profile=srgb",
"--metrics-recording-only",
"--no-first-run",
"--enable-automation",
"--password-store=basic",
"--use-mock-keychain",
"--enable-blink-features=IdleDetection",
];
let mut cmd = process::Command::new(&self.executable);
cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args(
self.extensions
.iter()
.map(|e| format!("--load-extension={}", e)),
);
if let Some(ref user_data) = self.user_data_dir {
cmd.arg(format!("--user-data-dir={}", user_data.display()));
}
if let Some((width, height)) = self.window_size {
cmd.arg(format!("--window-size={},{}", width, height));
}
if!self.sandbox {
cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]);
}
if self.headless {
cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]);
}
if self.incognito {
cmd.arg("--incognito");
}
if let Some(ref envs) = self.process_envs {
cmd.envs(envs);
}
cmd.stderr(Stdio::piped()).spawn()
}
}
/// Returns the path to Chrome's executable.
///
/// If the `CHROME` environment variable is set, `default_executable` will
/// use it as the default path. Otherwise, the filenames `google-chrome-stable`
/// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are
/// searched for in standard places. If that fails,
/// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on
/// Windows) is consulted. If all of the above fail, an error is returned.
pub fn default_executable() -> Result<std::path::PathBuf, String>
|
{
let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..];
for path in default_paths {
if std::path::Path::new(path).exists() {
return Ok(path.into());
}
}
}
#[cfg(windows)]
{
if let Some(path) = get_chrome_path_from_windows_registry() {
if path.exists() {
return Ok(path);
}
}
}
Err("Could not auto detect a chrome executable".to_string())
}
#[cfg(windows)]
pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> {
winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE)
.open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe")
|
{
if let Ok(path) = std::env::var("CHROME") {
if std::path::Path::new(&path).exists() {
return Ok(path.into());
}
}
for app in &[
"google-chrome-stable",
"chromium",
"chromium-browser",
"chrome",
"chrome-browser",
] {
if let Ok(path) = which::which(app) {
return Ok(path);
}
}
#[cfg(target_os = "macos")]
|
identifier_body
|
browser.rs
|
_response, CommandMessage};
use crate::conn::Connection;
use crate::error::{CdpError, Result};
use crate::handler::browser::BrowserContext;
use crate::handler::viewport::Viewport;
use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT};
use crate::listeners::{EventListenerRequest, EventStream};
use crate::page::Page;
use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns};
/// A [`Browser`] is created when chromiumoxide connects to a Chromium instance.
#[derive(Debug)]
pub struct Browser {
/// The `Sender` to send messages to the connection handler that drives the
/// websocket
sender: Sender<HandlerMessage>,
/// How the spawned chromium instance was configured, if any
config: Option<BrowserConfig>,
/// The spawned chromium instance
child: Option<Child>,
/// The debug web socket url of the chromium instance
debug_ws_url: String,
/// The context of the browser
browser_context: BrowserContext,
}
impl Browser {
/// Connect to an already running chromium instance via websocket
pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> {
let debug_ws_url = debug_ws_url.into();
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let fut = Handler::new(conn, rx, HandlerConfig::default());
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: None,
child: None,
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// Launches a new instance of `chromium` in the background and attaches to
/// its debug web socket.
///
/// This fails when no chromium executable could be detected.
///
/// This fails if no web socket url could be detected from the child
/// processes stderr for more than 20 seconds.
pub async fn
|
(config: BrowserConfig) -> Result<(Self, Handler)> {
// launch a new chromium instance
let mut child = config.launch()?;
// extract the ws:
let get_ws_url = ws_url_from_output(&mut child);
let dur = Duration::from_secs(20);
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
let debug_ws_url = async_std::future::timeout(dur, get_ws_url)
.await
.map_err(|_| CdpError::Timeout)?;
} else if #[cfg(feature = "tokio-runtime")] {
let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await
.map_err(|_| CdpError::Timeout)?;
}
}
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let handler_config = HandlerConfig {
ignore_https_errors: config.ignore_https_errors,
viewport: Some(config.viewport.clone()),
context_ids: Vec::new(),
request_timeout: config.request_timeout,
};
let fut = Handler::new(conn, rx, handler_config);
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: Some(config),
child: Some(child),
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// If not launched as incognito this creates a new incognito browser
/// context. After that this browser exists within the incognito session.
/// New pages created while being in incognito mode will also run in the
/// incognito context. Incognito contexts won't share cookies/cache with
/// other browser contexts.
pub async fn start_incognito_context(&mut self) -> Result<&mut Self> {
if!self.is_incognito_configured() {
let resp = self
.execute(CreateBrowserContextParams::default())
.await?
.result;
self.browser_context = BrowserContext::from(resp.browser_context_id);
self.sender
.clone()
.send(HandlerMessage::InsertContext(self.browser_context.clone()))
.await?;
}
Ok(self)
}
/// If a incognito session was created with
/// `Browser::start_incognito_context` this disposes this context.
///
/// # Note This will also dispose all pages that were running within the
/// incognito context.
pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> {
if let Some(id) = self.browser_context.take() {
self.execute(DisposeBrowserContextParams::new(id.clone()))
.await?;
self.sender
.clone()
.send(HandlerMessage::DisposeContext(BrowserContext::from(id)))
.await?;
}
Ok(self)
}
/// Whether incognito mode was configured from the start
fn is_incognito_configured(&self) -> bool {
self.config
.as_ref()
.map(|c| c.incognito)
.unwrap_or_default()
}
/// Returns the address of the websocket this browser is attached to
pub fn websocket_address(&self) -> &String {
&self.debug_ws_url
}
/// Whether the BrowserContext is incognito.
pub fn is_incognito(&self) -> bool {
self.is_incognito_configured() || self.browser_context.is_incognito()
}
/// The config of the spawned chromium instance if any.
pub fn config(&self) -> Option<&BrowserConfig> {
self.config.as_ref()
}
/// Create a new browser page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser") {
return ws.trim().to_string();
}
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
self.window_size = Some((width, height));
self
}
pub fn no_sandbox(mut self) -> Self {
self.sandbox = false;
self
}
pub fn with_head(mut self) -> Self {
self.headless = false;
self
}
pub fn incognito(mut self) -> Self {
self.incognito = true;
self
}
pub fn respect_https_errors(mut self) -> Self {
self.ignore_https_errors = false;
self
}
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn viewport(mut self, viewport: Viewport) -> Self {
self.viewport = viewport;
self
}
pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self {
self.user_data_dir = Some(data_dir.as_ref().to_path_buf());
self
}
pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self {
self.executable = Some(path.as_ref().to_path_buf());
self
}
pub fn extension(mut self, extension: impl Into<String>) -> Self {
self.extensions.push(extension.into());
self
}
pub fn extensions<I, S>(mut self, extensions: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for ext in extensions {
self.extensions.push(ext.into());
}
self
}
pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self {
self.process_envs
.get_or_insert(HashMap::new())
.insert(key.into(), val.into());
self
}
pub fn envs<I, K, V>(mut self, envs: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: Into<String>,
V: Into<String>,
{
self.process_envs
.get_or_insert(HashMap::new())
.extend(envs.into_iter().map(|(k, v)| (k.into(), v.into())));
self
}
pub fn arg(mut self, arg: impl Into<String>) -> Self {
self.args.push(arg.into());
self
}
pub fn args<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for arg in args {
self.args.push(arg.into());
}
self
}
pub fn build(self) -> std::result::Result<BrowserConfig, String> {
let executable = if let Some(e) = self.executable {
e
} else {
default_executable()?
};
Ok(BrowserConfig {
headless: self.headless,
sandbox: self.sandbox,
window_size: self.window_size,
port: self.port,
executable,
extensions: self.extensions,
process_envs: self.process_envs,
user_data_dir: self.user_data_dir,
incognito: self.incognito,
ignore_https_errors: self.ignore_https_errors,
viewport: self.viewport,
request_timeout: self.request_timeout,
args: self.args,
})
}
}
impl BrowserConfig {
pub fn launch(&self) -> io::Result<Child> {
let dbg_port = format!("--remote-debugging-port={}", self.port);
let args = [
dbg_port.as_str(),
"--disable-background-networking",
"--enable-features=NetworkService,NetworkServiceInProcess",
"--disable-background-timer-throttling",
"--disable-backgrounding-occluded-windows",
"--disable-breakpad",
"--disable-client-side-phishing-detection",
"--disable-component-extensions-with-background-pages",
"--disable-default-apps",
"--disable-dev-shm-usage",
"--disable-extensions",
"--disable-features=TranslateUI",
"--disable-hang-monitor",
"--disable-ipc-flooding-protection",
"--disable-popup-blocking",
"--disable-prompt-on-repost",
"--disable-renderer-backgrounding",
"--disable-sync",
"--force-color-profile=srgb",
"--metrics-recording-only",
"--no-first-run",
"--enable-automation",
"--password-store=basic",
"--use-mock-keychain",
"--enable-blink-features=IdleDetection",
];
let mut cmd = process::Command::new(&self.executable);
cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args(
self.extensions
.iter()
.map(|e| format!("--load-extension={}", e)),
);
if let Some(ref user_data) = self.user_data_dir {
cmd.arg(format!("--user-data-dir={}", user_data.display()));
}
if let Some((width, height)) = self.window_size {
cmd.arg(format!("--window-size={},{}", width, height));
}
if!self.sandbox {
cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]);
}
if self.headless {
cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]);
}
if self.incognito {
cmd.arg("--incognito");
}
if let Some(ref envs) = self.process_envs {
cmd.envs(envs);
}
cmd.stderr(Stdio::piped()).spawn()
}
}
/// Returns the path to Chrome's executable.
///
/// If the `CHROME` environment variable is set, `default_executable` will
/// use it as the default path. Otherwise, the filenames `google-chrome-stable`
/// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are
/// searched for in standard places. If that fails,
/// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on
/// Windows) is consulted. If all of the above fail, an error is returned.
pub fn default_executable() -> Result<std::path::PathBuf, String> {
if let Ok(path) = std::env::var("CHROME") {
if std::path::Path::new(&path).exists() {
return Ok(path.into());
}
}
for app in &[
"google-chrome-stable",
"chromium",
"chromium-browser",
"chrome",
"chrome-browser",
] {
if let Ok(path) = which::which(app) {
return Ok(path);
}
}
#[cfg(target_os = "macos")]
{
let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..];
for path in default_paths {
if std::path::Path::new(path).exists() {
return Ok(path.into());
}
}
}
#[cfg(windows)]
{
if let Some(path) = get_chrome_path_from_windows_registry() {
if path.exists() {
return Ok(path);
}
}
}
Err("Could not auto detect a chrome executable".to_string())
}
#[cfg(windows)]
pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> {
winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE)
.open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe")
|
launch
|
identifier_name
|
main.rs
|
#![feature(plugin, decl_macro, custom_derive, type_ascription)] // Compiler plugins
#![plugin(rocket_codegen)] // rocket code generator
extern crate rocket;
extern crate rabe;
extern crate serde;
extern crate serde_json;
extern crate rustc_serialize;
extern crate blake2_rfc;
extern crate rocket_simpleauth;
extern crate rand;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use std::error::*;
use std::fs::*;
use std::sync::{Once, ONCE_INIT};
use rand::Rng;
use rand::os::OsRng;
use rocket_contrib::{Json};
use rocket::response::status::BadRequest;
use rocket::http::*;
use rocket::request::FromRequest;
use rocket::request::Request;
use rocket::outcome::Outcome;
use diesel::*;
use std::str;
use std::str::FromStr;
use std::env;
use rabe::schemes::bsw;
use blake2_rfc::blake2b::*;
pub mod schema;
// Change the alias to `Box<error::Error>`.
type BoxedResult<T> = std::result::Result<T, Box<Error>>;
enum SCHEMES {
bsw
}
impl FromStr for SCHEMES {
type Err = ();
fn from_str(s: &str) -> Result<SCHEMES, ()> {
match s {
"bsw" => Ok(SCHEMES::bsw),
_ => Err(()),
}
}
}
// ----------------------------------------------------
// Internal structs follow
// ----------------------------------------------------
struct ApiKey(String);
impl<'t, 'r> FromRequest<'t, 'r> for ApiKey {
type Error = ();
fn from_request(request: &'t Request<'r>) -> Outcome<ApiKey, (Status,()), ()> {
let keys: Vec<_> = request.headers().get("Authorization").collect();
if keys.len()!= 1 {
return Outcome::Failure((Status::BadRequest, ()));
}
println!("Got API key {}", keys[0]);
let key = keys[0];
if!is_valid(keys[0].to_string()) {
// return Outcome::Forward(());
return Outcome::Failure((Status::Unauthorized, ()));
}
return Outcome::Success(ApiKey(key.to_string()));
}
}
// -----------------------------------------------------
// Message formats follow
// -----------------------------------------------------
#[derive(Serialize, Deserialize)]
struct Message {
contents: String
}
#[derive(Serialize, Deserialize)]
struct SetupMsg {
scheme: String,
attributes: Vec<String>
}
#[derive(Serialize, Deserialize)]
struct KeyGenMsg {
attributes: Vec<String>,
scheme: String,
}
#[derive(Serialize, Deserialize)]
struct EncMessage {
plaintext :String,
policy : String, // A json serialized policy that is understood by the scheme assigned to the session
session_id : String // Session ID unique per (user,scheme)
}
#[derive(Serialize, Deserialize)]
struct DecMessage {
ct: String,
session_id: String, // Session ID unique per (user,scheme)
username: String,
password: String
}
#[derive(Serialize, Deserialize)]
struct ListAttrMsg {
username : String,
password : String
}
#[derive(Serialize, Deserialize)]
struct User {
username: String,
password: String,
attributes: Vec<String>,
random_session_id: String // Reference to session
}
// -----------------------------------------------------
// REST APIs follow
// -----------------------------------------------------
#[post(path="/encrypt", format="application/json", data="<d>")]
fn encrypt(d:Json<EncMessage>) -> Result<Json<String>, BadRequest<String>> {
// Get active session (panics if not available)
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
// Get key material needed for encryption
let key_material: Vec<String> = serde_json::from_str(&session.key_material.as_str()).unwrap();
let pk_string : &String = &key_material[0];
let plaintext: &Vec<u8> = &d.plaintext.as_bytes().to_vec();
let pk : bsw::CpAbePublicKey = serde_json::from_str(pk_string.as_str()).unwrap(); // TODO NotNice: need to convert to scheme-specific type here. Should be generic trait w/ function "KeyMaterial.get_public_key()"
println!("plaintext {:?}", plaintext);
println!("policy {:?}", &d.policy);
let res = bsw::encrypt(&pk, &d.policy, plaintext).unwrap();
Ok(Json(serde_json::to_string_pretty(&res).unwrap()))
}
#[post(path="/decrypt", format="application/json", data="<d>")]
fn decrypt(d:Json<DecMessage>) -> Result<Json<String>, BadRequest<String>> {
println!("Decryption demanded with ciphertext {}", &d.ct);
// Get session from DB and extract key material needed for decryption
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
let users: Vec<schema::User> = _db_get_users_by_apikey(&conn, &session.random_session_id);
println!("Users {:?}", users.len());
let user: schema::User = users.into_iter().take_while(|u| u.username.eq(&d.username)).next().unwrap();
let key_material: String = user.key_material;
match session.scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let ct: bsw::CpAbeCiphertext = serde_json::from_str(&d.ct).unwrap();
let sk: bsw::CpAbeSecretKey = serde_json::from_str(&key_material).unwrap();
// Decrypt ciphertext
let res = bsw::decrypt(&sk, &ct).unwrap();
let s = match str::from_utf8(&res) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
Ok(Json(s.to_string()))
},
Err(_) => Err(BadRequest(Some(format!("Unsupported scheme {} of session {}", session.scheme, session.random_session_id))))
}
}
#[post(path="/add_user", format="application/json", data="<d>")]
fn
|
(d:Json<User>) -> Result<(), BadRequest<String>> {
let ref username: String = d.username;
let ref passwd: String = d.password;
let ref random_session_id = d.random_session_id;
let salt: i32 = 1234; // TODO use random salt when storing hashed user passwords
println!("Adding user {} {} {} {}", &username, &passwd, salt, random_session_id);
// Create keys for the user
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &random_session_id).unwrap();
let scheme: String = session.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let master_key_material: Vec<String> = serde_json::from_str(&session.key_material).unwrap();
let master_pk: bsw::CpAbePublicKey = serde_json::from_str(&master_key_material[0]).unwrap();
let master_mk: bsw::CpAbeMasterKey = serde_json::from_str(&master_key_material[1]).unwrap();
let user_sk: bsw::CpAbeSecretKey = bsw::keygen(&master_pk, &master_mk, &d.attributes).unwrap();
match db_add_user(&conn, &username, &passwd, salt, &d.attributes, &serde_json::to_string(&user_sk).unwrap(), random_session_id) {
Err(e) => {println!("Nope! {}", e); return Err(BadRequest(Some(format!("Failure adding userpk failure: {}", e))))},
Ok(_r) => return Ok(())
}
},
Err(_) => { return Err(BadRequest(Some(format!("Scheme {} not supported", scheme)))); }
}
}
#[post(path="/list_attrs", format="application/json", data="<d>")]
fn list_attrs(d:Json<ListAttrMsg>, key: ApiKey) -> Result<(String), BadRequest<String>> {
let conn: MysqlConnection = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &key.0).unwrap();
return Ok(session.key_material);
}
#[post(path="/setup", format="application/json", data="<d>")]
fn setup(d:Json<SetupMsg>) -> Result<(String), BadRequest<String>> {
let param: SetupMsg = d.into_inner();
let conn: MysqlConnection = db_connect();
let attributes: String = serde_json::to_string(¶m.attributes).unwrap();
// Setup of a new session. Create keys first
let key_gen_params = KeyGenMsg {
attributes: param.attributes,
scheme: "bsw".to_string()
};
println!("Creating key for {} attributes", key_gen_params.attributes.len());
let key_material: Vec<String> = match _keygen(key_gen_params) { // TODO NotNice: keygen returns a vector of strings. Instead it should return some Box<KeyMaterial> with functions like get_public_key() etc.
Ok(material) => material,
Err(e) => { return Err(BadRequest(Some(format!("Failure to create keys {}",e)))); }
};
// Write new session to database and return its id
let session = db_create_session(&conn, &String::from("bsw"), &key_material, &attributes);
return Ok(session.unwrap());
}
fn _keygen(param: KeyGenMsg) -> Result<Vec<String>, String> {
let scheme: String = param.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
// Generating mk
let (pk, mk): (bsw::CpAbePublicKey,bsw::CpAbeMasterKey) = bsw::setup();
let mut _attributes = param.attributes;
//Generating attribute keys
let res:bsw::CpAbeSecretKey = bsw::keygen(&pk, &mk, &_attributes).unwrap();
Ok(vec![serde_json::to_string(&pk).unwrap(),
serde_json::to_string(&mk).unwrap(),
serde_json::to_string(&res).unwrap()])
},
Err(e) => Err("Unsupported scheme".to_string())
}
}
// ------------------------------------------------------------
// Internal methods follow
// ------------------------------------------------------------
fn db_connect() -> MysqlConnection {
let database_url : String = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
MysqlConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url)) // TODO Replace MysqlConnection with more generic "Connection"?
}
/// Adds a user to database.
fn db_add_user(conn: &MysqlConnection, username: &String, passwd: &String, salt: i32, attributes: &Vec<String>, key_material: &String, random_session_id: &String) -> Result<usize, String> {
use schema::users;
use schema::sessions;
// Get primary key from sessions table
let session: schema::Session = match sessions::table.filter(sessions::random_session_id.eq(random_session_id)).first::<schema::Session>(conn) {
Ok(s) => s,
Err(_e) => {return Err(format!("No session with random id {} present. Cannot add user.", random_session_id)); }
};
match users::table
.filter(users::username.eq(username.to_string()))
.filter(users::session_id.eq(session.id))
.first::<schema::User>(conn) {
Ok(_u) => return Err("User already exists for this session".to_string()),
Err(_e) => {}
};
let user = schema::NewUser {
username: username.to_string(),
password: passwd.to_string(), // TODO store salted hash of pwd.
attributes: serde_json::to_string(attributes).unwrap(),
key_material: key_material.to_string(),
salt: salt,
session_id: session.id
};
match diesel::insert_into(users::table)
.values(&user)
.execute(conn) {
Ok(id) => Ok(id),
Err(_e) => Err("Could not insert user".to_string())
}
}
fn db_create_session(conn: &MysqlConnection, scheme: &String, key_material: &Vec<String>, attributes: &String) -> Result<String, String> {
use schema::sessions;
println!("Got scheme {}", scheme);
match scheme.parse::<SCHEMES>() {
Ok(_scheme) => {
let session_id: String = OsRng::new().unwrap().next_u64().to_string();
let session = schema::NewSession {
is_initialized: false,
scheme: scheme.to_string(),
random_session_id: session_id.clone(),
key_material: serde_json::to_string(key_material).unwrap(),
attributes: attributes.to_string()
};
println!("Key material is {}", session.key_material);
// Return auto-gen'd session id
match diesel::insert_into(sessions::table)
.values(&session)
.execute(conn) {
Ok(_usize) => Ok(session_id),
Err(_e) => Err("Could not insert into sessions".to_string())
}
}
Err(_) => Err("Invalid scheme".to_string())
}
}
fn db_get_session_by_api_key(conn: &MysqlConnection, api_key: &String) -> Result<schema::Session, diesel::result::Error> {
use schema::sessions;
sessions::table.filter(sessions::random_session_id.eq(api_key))
.first::<schema::Session>(conn)
}
fn _db_get_user_by_username<'a>(conn: &MysqlConnection, user: &'a String) -> Option<schema::User> {
use schema::users;
match users::table.filter(users::username.eq(user))
.first::<schema::User>(conn) {
Ok(u) => Some(u),
Err(_) => None
}
}
fn _db_get_users_by_apikey<'a>(conn: &MysqlConnection, api_key: &String) -> Vec<schema::User> {
use schema::sessions;
use schema::users;
users::table
.inner_join(sessions::table)
.filter(sessions::random_session_id.eq(api_key))
.get_results::<(schema::User, schema::Session)>(conn)
.expect("Could not load users by API key {}")
.into_iter().map(|(user, _session)| user).collect()
}
/// TODO Use to create salted hashed passwords
fn _to_db_passwd(plain_password: String, salt: i32) -> Blake2bResult {
let salted_pwd = plain_password + &salt.to_string();
let res = blake2b(64, &[], salted_pwd.as_bytes());
return res;
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![setup, list_attrs, encrypt, decrypt, add_user])
}
fn main() {
rocket().launch();
}
/// Returns true if `key` is a valid API key string.
fn is_valid(api_key: String) -> bool {
use schema::users;
use schema::sessions;
let k : String = match api_key.starts_with("Bearer ") {
true => api_key.replace("Bearer ", ""),
false => api_key
};
let conn = db_connect();
match users::table
.inner_join(sessions::table)
.filter(users::session_id.eq(sessions::id))
.filter(sessions::random_session_id.eq(k))
.count()
.get_result::<i64>(&conn) {
Ok(_) => return true,
Err(_e) => return false
}
}
// -----------------------------------------------
// Tests follow
// -----------------------------------------------
#[cfg(test)]
mod tests {
use super::rocket;
use rocket::local::Client;
use rocket::http::Status;
use super::*;
#[test]
fn test_db_user() {
let con = db_connect();
// make sure we have a session to test with
let session_id: String = db_create_session(&con, &String::from("bsw"), &vec!["".to_string()], &"".to_string()).unwrap();
// Write user into db
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let key_material: String = "".to_string();
let attributes: Vec<String>= vec!("".to_string());
let salt: i32 = 1234;
let result: usize = db_add_user(&con, &user, &passwd, salt, &attributes, &key_material, &session_id).unwrap();
assert!(result > 0);
// Check that it is there
let u: schema::User = _db_get_user_by_username(&con, &user).unwrap();
assert_eq!(u.username, user);
}
#[test]
fn test_db_session() {
let con = db_connect();
// Create a user
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let salt: i32 = 1234;
// Setup of a new session. Create keys
let key_gen_parms = KeyGenMsg {
attributes: vec!["attribute_1".to_string(), "attribute_2".to_string()],
scheme: "bsw".to_string()
};
let key_material: Vec<String> = _keygen(key_gen_parms).unwrap();
let attributes: Vec<String> = vec!(String::from(""));
let scheme: String = "bsw".to_string();
let session_id: String = db_create_session(&con, &scheme, &key_material, &serde_json::to_string(&attributes).unwrap()).expect("Could not create session");
println!("Got session id {}", session_id);
db_add_user(&con, &user, &passwd, salt, &attributes, &serde_json::to_string(&key_material).unwrap(), &session_id).expect("Failure adding user");
}
#[test]
fn test_setup() {
let client = Client::new(rocket()).expect("valid rocket instance");
println!("Have rocket");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id: String = response.body_string().unwrap();
println!("SETUP RETURNED {}", &session_id);
// Create user
let user = User {
random_session_id: session_id,
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let response_add: rocket::local::LocalResponse = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
//assert_eq!(response_add.status(), Status::Ok);
}
#[test]
fn test_encrypt_decrypt() {
let client = Client::new(rocket()).expect("valid rocket instance");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id : &String = &response.body_string().unwrap();
println!("Setup returned SessionID {}",session_id);
// Create user
let user = User {
random_session_id: session_id.to_string(),
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let _response_add = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
// Encrypt some text for a policy
let policy:String = String::from(r#"{"OR": [{"ATT": "attribute_1"}, {"ATT": "attribute_2"}]}"#);
let msg : EncMessage = EncMessage {
plaintext : "Encrypt me".into(),
policy : policy,
session_id : session_id.clone()
};
let mut resp_enc = client.post("/encrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&msg).expect("Encryption"))
.dispatch();
assert_eq!(resp_enc.status(), Status::Ok);
let ct:String = resp_enc.body_string().unwrap();
let ct_json = serde_json::from_str(&ct).unwrap();
// Decrypt again
let c : DecMessage = DecMessage {
ct: ct_json,
session_id: session_id.clone(),
username: String::from("admin"),
password: String::from("admin")
};
let mut resp_dec = client.post("/decrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&c).unwrap())
.dispatch();
let pt_hex: String = resp_dec.body_string().unwrap();
println!("HEX: {}", pt_hex);
let mut pt: String = serde_json::from_str(&pt_hex).expect("From json");
pt = pt.trim().to_string();
println!("RESULT: {}", pt);
assert_eq!(pt, "Encrypt me");
}
}
|
add_user
|
identifier_name
|
main.rs
|
#![feature(plugin, decl_macro, custom_derive, type_ascription)] // Compiler plugins
#![plugin(rocket_codegen)] // rocket code generator
extern crate rocket;
extern crate rabe;
extern crate serde;
extern crate serde_json;
extern crate rustc_serialize;
extern crate blake2_rfc;
extern crate rocket_simpleauth;
extern crate rand;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use std::error::*;
use std::fs::*;
use std::sync::{Once, ONCE_INIT};
use rand::Rng;
use rand::os::OsRng;
use rocket_contrib::{Json};
use rocket::response::status::BadRequest;
use rocket::http::*;
use rocket::request::FromRequest;
use rocket::request::Request;
use rocket::outcome::Outcome;
use diesel::*;
use std::str;
use std::str::FromStr;
use std::env;
use rabe::schemes::bsw;
use blake2_rfc::blake2b::*;
pub mod schema;
// Change the alias to `Box<error::Error>`.
type BoxedResult<T> = std::result::Result<T, Box<Error>>;
enum SCHEMES {
bsw
}
impl FromStr for SCHEMES {
type Err = ();
fn from_str(s: &str) -> Result<SCHEMES, ()> {
match s {
"bsw" => Ok(SCHEMES::bsw),
_ => Err(()),
}
}
}
// ----------------------------------------------------
// Internal structs follow
// ----------------------------------------------------
struct ApiKey(String);
impl<'t, 'r> FromRequest<'t, 'r> for ApiKey {
type Error = ();
fn from_request(request: &'t Request<'r>) -> Outcome<ApiKey, (Status,()), ()> {
let keys: Vec<_> = request.headers().get("Authorization").collect();
if keys.len()!= 1 {
return Outcome::Failure((Status::BadRequest, ()));
}
println!("Got API key {}", keys[0]);
let key = keys[0];
if!is_valid(keys[0].to_string()) {
// return Outcome::Forward(());
return Outcome::Failure((Status::Unauthorized, ()));
}
return Outcome::Success(ApiKey(key.to_string()));
}
}
// -----------------------------------------------------
// Message formats follow
// -----------------------------------------------------
#[derive(Serialize, Deserialize)]
struct Message {
contents: String
}
#[derive(Serialize, Deserialize)]
struct SetupMsg {
scheme: String,
attributes: Vec<String>
}
#[derive(Serialize, Deserialize)]
struct KeyGenMsg {
attributes: Vec<String>,
scheme: String,
}
#[derive(Serialize, Deserialize)]
struct EncMessage {
plaintext :String,
policy : String, // A json serialized policy that is understood by the scheme assigned to the session
session_id : String // Session ID unique per (user,scheme)
}
#[derive(Serialize, Deserialize)]
struct DecMessage {
ct: String,
session_id: String, // Session ID unique per (user,scheme)
username: String,
password: String
}
#[derive(Serialize, Deserialize)]
struct ListAttrMsg {
username : String,
password : String
}
#[derive(Serialize, Deserialize)]
struct User {
username: String,
password: String,
attributes: Vec<String>,
random_session_id: String // Reference to session
}
// -----------------------------------------------------
// REST APIs follow
// -----------------------------------------------------
#[post(path="/encrypt", format="application/json", data="<d>")]
fn encrypt(d:Json<EncMessage>) -> Result<Json<String>, BadRequest<String>> {
// Get active session (panics if not available)
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
// Get key material needed for encryption
let key_material: Vec<String> = serde_json::from_str(&session.key_material.as_str()).unwrap();
let pk_string : &String = &key_material[0];
let plaintext: &Vec<u8> = &d.plaintext.as_bytes().to_vec();
let pk : bsw::CpAbePublicKey = serde_json::from_str(pk_string.as_str()).unwrap(); // TODO NotNice: need to convert to scheme-specific type here. Should be generic trait w/ function "KeyMaterial.get_public_key()"
println!("plaintext {:?}", plaintext);
println!("policy {:?}", &d.policy);
let res = bsw::encrypt(&pk, &d.policy, plaintext).unwrap();
Ok(Json(serde_json::to_string_pretty(&res).unwrap()))
}
#[post(path="/decrypt", format="application/json", data="<d>")]
fn decrypt(d:Json<DecMessage>) -> Result<Json<String>, BadRequest<String>> {
println!("Decryption demanded with ciphertext {}", &d.ct);
// Get session from DB and extract key material needed for decryption
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
let users: Vec<schema::User> = _db_get_users_by_apikey(&conn, &session.random_session_id);
println!("Users {:?}", users.len());
let user: schema::User = users.into_iter().take_while(|u| u.username.eq(&d.username)).next().unwrap();
let key_material: String = user.key_material;
match session.scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let ct: bsw::CpAbeCiphertext = serde_json::from_str(&d.ct).unwrap();
let sk: bsw::CpAbeSecretKey = serde_json::from_str(&key_material).unwrap();
// Decrypt ciphertext
let res = bsw::decrypt(&sk, &ct).unwrap();
let s = match str::from_utf8(&res) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
Ok(Json(s.to_string()))
},
Err(_) => Err(BadRequest(Some(format!("Unsupported scheme {} of session {}", session.scheme, session.random_session_id))))
}
}
#[post(path="/add_user", format="application/json", data="<d>")]
fn add_user(d:Json<User>) -> Result<(), BadRequest<String>> {
let ref username: String = d.username;
let ref passwd: String = d.password;
let ref random_session_id = d.random_session_id;
let salt: i32 = 1234; // TODO use random salt when storing hashed user passwords
println!("Adding user {} {} {} {}", &username, &passwd, salt, random_session_id);
// Create keys for the user
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &random_session_id).unwrap();
let scheme: String = session.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let master_key_material: Vec<String> = serde_json::from_str(&session.key_material).unwrap();
let master_pk: bsw::CpAbePublicKey = serde_json::from_str(&master_key_material[0]).unwrap();
let master_mk: bsw::CpAbeMasterKey = serde_json::from_str(&master_key_material[1]).unwrap();
let user_sk: bsw::CpAbeSecretKey = bsw::keygen(&master_pk, &master_mk, &d.attributes).unwrap();
match db_add_user(&conn, &username, &passwd, salt, &d.attributes, &serde_json::to_string(&user_sk).unwrap(), random_session_id) {
Err(e) => {println!("Nope! {}", e); return Err(BadRequest(Some(format!("Failure adding userpk failure: {}", e))))},
Ok(_r) => return Ok(())
}
},
Err(_) => { return Err(BadRequest(Some(format!("Scheme {} not supported", scheme)))); }
}
}
#[post(path="/list_attrs", format="application/json", data="<d>")]
fn list_attrs(d:Json<ListAttrMsg>, key: ApiKey) -> Result<(String), BadRequest<String>> {
let conn: MysqlConnection = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &key.0).unwrap();
return Ok(session.key_material);
}
#[post(path="/setup", format="application/json", data="<d>")]
fn setup(d:Json<SetupMsg>) -> Result<(String), BadRequest<String>> {
let param: SetupMsg = d.into_inner();
let conn: MysqlConnection = db_connect();
let attributes: String = serde_json::to_string(¶m.attributes).unwrap();
// Setup of a new session. Create keys first
let key_gen_params = KeyGenMsg {
attributes: param.attributes,
scheme: "bsw".to_string()
};
println!("Creating key for {} attributes", key_gen_params.attributes.len());
let key_material: Vec<String> = match _keygen(key_gen_params) { // TODO NotNice: keygen returns a vector of strings. Instead it should return some Box<KeyMaterial> with functions like get_public_key() etc.
Ok(material) => material,
Err(e) => { return Err(BadRequest(Some(format!("Failure to create keys {}",e)))); }
};
// Write new session to database and return its id
let session = db_create_session(&conn, &String::from("bsw"), &key_material, &attributes);
return Ok(session.unwrap());
}
fn _keygen(param: KeyGenMsg) -> Result<Vec<String>, String> {
let scheme: String = param.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
// Generating mk
let (pk, mk): (bsw::CpAbePublicKey,bsw::CpAbeMasterKey) = bsw::setup();
let mut _attributes = param.attributes;
//Generating attribute keys
let res:bsw::CpAbeSecretKey = bsw::keygen(&pk, &mk, &_attributes).unwrap();
Ok(vec![serde_json::to_string(&pk).unwrap(),
serde_json::to_string(&mk).unwrap(),
serde_json::to_string(&res).unwrap()])
},
Err(e) => Err("Unsupported scheme".to_string())
}
}
// ------------------------------------------------------------
// Internal methods follow
// ------------------------------------------------------------
fn db_connect() -> MysqlConnection {
let database_url : String = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
MysqlConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url)) // TODO Replace MysqlConnection with more generic "Connection"?
}
/// Adds a user to database.
fn db_add_user(conn: &MysqlConnection, username: &String, passwd: &String, salt: i32, attributes: &Vec<String>, key_material: &String, random_session_id: &String) -> Result<usize, String> {
use schema::users;
use schema::sessions;
// Get primary key from sessions table
let session: schema::Session = match sessions::table.filter(sessions::random_session_id.eq(random_session_id)).first::<schema::Session>(conn) {
Ok(s) => s,
Err(_e) => {return Err(format!("No session with random id {} present. Cannot add user.", random_session_id)); }
};
match users::table
.filter(users::username.eq(username.to_string()))
.filter(users::session_id.eq(session.id))
.first::<schema::User>(conn) {
Ok(_u) => return Err("User already exists for this session".to_string()),
Err(_e) => {}
};
let user = schema::NewUser {
username: username.to_string(),
password: passwd.to_string(), // TODO store salted hash of pwd.
attributes: serde_json::to_string(attributes).unwrap(),
key_material: key_material.to_string(),
salt: salt,
session_id: session.id
};
match diesel::insert_into(users::table)
.values(&user)
.execute(conn) {
Ok(id) => Ok(id),
Err(_e) => Err("Could not insert user".to_string())
}
}
fn db_create_session(conn: &MysqlConnection, scheme: &String, key_material: &Vec<String>, attributes: &String) -> Result<String, String>
|
Ok(_usize) => Ok(session_id),
Err(_e) => Err("Could not insert into sessions".to_string())
}
}
Err(_) => Err("Invalid scheme".to_string())
}
}
fn db_get_session_by_api_key(conn: &MysqlConnection, api_key: &String) -> Result<schema::Session, diesel::result::Error> {
use schema::sessions;
sessions::table.filter(sessions::random_session_id.eq(api_key))
.first::<schema::Session>(conn)
}
fn _db_get_user_by_username<'a>(conn: &MysqlConnection, user: &'a String) -> Option<schema::User> {
use schema::users;
match users::table.filter(users::username.eq(user))
.first::<schema::User>(conn) {
Ok(u) => Some(u),
Err(_) => None
}
}
fn _db_get_users_by_apikey<'a>(conn: &MysqlConnection, api_key: &String) -> Vec<schema::User> {
use schema::sessions;
use schema::users;
users::table
.inner_join(sessions::table)
.filter(sessions::random_session_id.eq(api_key))
.get_results::<(schema::User, schema::Session)>(conn)
.expect("Could not load users by API key {}")
.into_iter().map(|(user, _session)| user).collect()
}
/// TODO Use to create salted hashed passwords
fn _to_db_passwd(plain_password: String, salt: i32) -> Blake2bResult {
let salted_pwd = plain_password + &salt.to_string();
let res = blake2b(64, &[], salted_pwd.as_bytes());
return res;
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![setup, list_attrs, encrypt, decrypt, add_user])
}
fn main() {
rocket().launch();
}
/// Returns true if `key` is a valid API key string.
fn is_valid(api_key: String) -> bool {
use schema::users;
use schema::sessions;
let k : String = match api_key.starts_with("Bearer ") {
true => api_key.replace("Bearer ", ""),
false => api_key
};
let conn = db_connect();
match users::table
.inner_join(sessions::table)
.filter(users::session_id.eq(sessions::id))
.filter(sessions::random_session_id.eq(k))
.count()
.get_result::<i64>(&conn) {
Ok(_) => return true,
Err(_e) => return false
}
}
// -----------------------------------------------
// Tests follow
// -----------------------------------------------
#[cfg(test)]
mod tests {
use super::rocket;
use rocket::local::Client;
use rocket::http::Status;
use super::*;
#[test]
fn test_db_user() {
let con = db_connect();
// make sure we have a session to test with
let session_id: String = db_create_session(&con, &String::from("bsw"), &vec!["".to_string()], &"".to_string()).unwrap();
// Write user into db
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let key_material: String = "".to_string();
let attributes: Vec<String>= vec!("".to_string());
let salt: i32 = 1234;
let result: usize = db_add_user(&con, &user, &passwd, salt, &attributes, &key_material, &session_id).unwrap();
assert!(result > 0);
// Check that it is there
let u: schema::User = _db_get_user_by_username(&con, &user).unwrap();
assert_eq!(u.username, user);
}
#[test]
fn test_db_session() {
let con = db_connect();
// Create a user
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let salt: i32 = 1234;
// Setup of a new session. Create keys
let key_gen_parms = KeyGenMsg {
attributes: vec!["attribute_1".to_string(), "attribute_2".to_string()],
scheme: "bsw".to_string()
};
let key_material: Vec<String> = _keygen(key_gen_parms).unwrap();
let attributes: Vec<String> = vec!(String::from(""));
let scheme: String = "bsw".to_string();
let session_id: String = db_create_session(&con, &scheme, &key_material, &serde_json::to_string(&attributes).unwrap()).expect("Could not create session");
println!("Got session id {}", session_id);
db_add_user(&con, &user, &passwd, salt, &attributes, &serde_json::to_string(&key_material).unwrap(), &session_id).expect("Failure adding user");
}
#[test]
fn test_setup() {
let client = Client::new(rocket()).expect("valid rocket instance");
println!("Have rocket");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id: String = response.body_string().unwrap();
println!("SETUP RETURNED {}", &session_id);
// Create user
let user = User {
random_session_id: session_id,
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let response_add: rocket::local::LocalResponse = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
//assert_eq!(response_add.status(), Status::Ok);
}
#[test]
fn test_encrypt_decrypt() {
let client = Client::new(rocket()).expect("valid rocket instance");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id : &String = &response.body_string().unwrap();
println!("Setup returned SessionID {}",session_id);
// Create user
let user = User {
random_session_id: session_id.to_string(),
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let _response_add = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
// Encrypt some text for a policy
let policy:String = String::from(r#"{"OR": [{"ATT": "attribute_1"}, {"ATT": "attribute_2"}]}"#);
let msg : EncMessage = EncMessage {
plaintext : "Encrypt me".into(),
policy : policy,
session_id : session_id.clone()
};
let mut resp_enc = client.post("/encrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&msg).expect("Encryption"))
.dispatch();
assert_eq!(resp_enc.status(), Status::Ok);
let ct:String = resp_enc.body_string().unwrap();
let ct_json = serde_json::from_str(&ct).unwrap();
// Decrypt again
let c : DecMessage = DecMessage {
ct: ct_json,
session_id: session_id.clone(),
username: String::from("admin"),
password: String::from("admin")
};
let mut resp_dec = client.post("/decrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&c).unwrap())
.dispatch();
let pt_hex: String = resp_dec.body_string().unwrap();
println!("HEX: {}", pt_hex);
let mut pt: String = serde_json::from_str(&pt_hex).expect("From json");
pt = pt.trim().to_string();
println!("RESULT: {}", pt);
assert_eq!(pt, "Encrypt me");
}
}
|
{
use schema::sessions;
println!("Got scheme {}", scheme);
match scheme.parse::<SCHEMES>() {
Ok(_scheme) => {
let session_id: String = OsRng::new().unwrap().next_u64().to_string();
let session = schema::NewSession {
is_initialized: false,
scheme: scheme.to_string(),
random_session_id: session_id.clone(),
key_material: serde_json::to_string(key_material).unwrap(),
attributes: attributes.to_string()
};
println!("Key material is {}", session.key_material);
// Return auto-gen'd session id
match diesel::insert_into(sessions::table)
.values(&session)
.execute(conn) {
|
identifier_body
|
main.rs
|
#![feature(plugin, decl_macro, custom_derive, type_ascription)] // Compiler plugins
#![plugin(rocket_codegen)] // rocket code generator
extern crate rocket;
extern crate rabe;
extern crate serde;
extern crate serde_json;
extern crate rustc_serialize;
extern crate blake2_rfc;
extern crate rocket_simpleauth;
extern crate rand;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use std::error::*;
use std::fs::*;
use std::sync::{Once, ONCE_INIT};
use rand::Rng;
use rand::os::OsRng;
use rocket_contrib::{Json};
use rocket::response::status::BadRequest;
use rocket::http::*;
use rocket::request::FromRequest;
use rocket::request::Request;
use rocket::outcome::Outcome;
use diesel::*;
use std::str;
use std::str::FromStr;
use std::env;
use rabe::schemes::bsw;
use blake2_rfc::blake2b::*;
pub mod schema;
// Change the alias to `Box<error::Error>`.
type BoxedResult<T> = std::result::Result<T, Box<Error>>;
enum SCHEMES {
bsw
}
impl FromStr for SCHEMES {
type Err = ();
fn from_str(s: &str) -> Result<SCHEMES, ()> {
match s {
"bsw" => Ok(SCHEMES::bsw),
_ => Err(()),
}
}
}
// ----------------------------------------------------
// Internal structs follow
// ----------------------------------------------------
struct ApiKey(String);
impl<'t, 'r> FromRequest<'t, 'r> for ApiKey {
type Error = ();
fn from_request(request: &'t Request<'r>) -> Outcome<ApiKey, (Status,()), ()> {
let keys: Vec<_> = request.headers().get("Authorization").collect();
if keys.len()!= 1 {
return Outcome::Failure((Status::BadRequest, ()));
}
println!("Got API key {}", keys[0]);
let key = keys[0];
if!is_valid(keys[0].to_string()) {
// return Outcome::Forward(());
|
}
// -----------------------------------------------------
// Message formats follow
// -----------------------------------------------------
#[derive(Serialize, Deserialize)]
struct Message {
contents: String
}
#[derive(Serialize, Deserialize)]
struct SetupMsg {
scheme: String,
attributes: Vec<String>
}
#[derive(Serialize, Deserialize)]
struct KeyGenMsg {
attributes: Vec<String>,
scheme: String,
}
#[derive(Serialize, Deserialize)]
struct EncMessage {
plaintext :String,
policy : String, // A json serialized policy that is understood by the scheme assigned to the session
session_id : String // Session ID unique per (user,scheme)
}
#[derive(Serialize, Deserialize)]
struct DecMessage {
ct: String,
session_id: String, // Session ID unique per (user,scheme)
username: String,
password: String
}
#[derive(Serialize, Deserialize)]
struct ListAttrMsg {
username : String,
password : String
}
#[derive(Serialize, Deserialize)]
struct User {
username: String,
password: String,
attributes: Vec<String>,
random_session_id: String // Reference to session
}
// -----------------------------------------------------
// REST APIs follow
// -----------------------------------------------------
#[post(path="/encrypt", format="application/json", data="<d>")]
fn encrypt(d:Json<EncMessage>) -> Result<Json<String>, BadRequest<String>> {
// Get active session (panics if not available)
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
// Get key material needed for encryption
let key_material: Vec<String> = serde_json::from_str(&session.key_material.as_str()).unwrap();
let pk_string : &String = &key_material[0];
let plaintext: &Vec<u8> = &d.plaintext.as_bytes().to_vec();
let pk : bsw::CpAbePublicKey = serde_json::from_str(pk_string.as_str()).unwrap(); // TODO NotNice: need to convert to scheme-specific type here. Should be generic trait w/ function "KeyMaterial.get_public_key()"
println!("plaintext {:?}", plaintext);
println!("policy {:?}", &d.policy);
let res = bsw::encrypt(&pk, &d.policy, plaintext).unwrap();
Ok(Json(serde_json::to_string_pretty(&res).unwrap()))
}
#[post(path="/decrypt", format="application/json", data="<d>")]
fn decrypt(d:Json<DecMessage>) -> Result<Json<String>, BadRequest<String>> {
println!("Decryption demanded with ciphertext {}", &d.ct);
// Get session from DB and extract key material needed for decryption
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
let users: Vec<schema::User> = _db_get_users_by_apikey(&conn, &session.random_session_id);
println!("Users {:?}", users.len());
let user: schema::User = users.into_iter().take_while(|u| u.username.eq(&d.username)).next().unwrap();
let key_material: String = user.key_material;
match session.scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let ct: bsw::CpAbeCiphertext = serde_json::from_str(&d.ct).unwrap();
let sk: bsw::CpAbeSecretKey = serde_json::from_str(&key_material).unwrap();
// Decrypt ciphertext
let res = bsw::decrypt(&sk, &ct).unwrap();
let s = match str::from_utf8(&res) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
Ok(Json(s.to_string()))
},
Err(_) => Err(BadRequest(Some(format!("Unsupported scheme {} of session {}", session.scheme, session.random_session_id))))
}
}
#[post(path="/add_user", format="application/json", data="<d>")]
fn add_user(d:Json<User>) -> Result<(), BadRequest<String>> {
let ref username: String = d.username;
let ref passwd: String = d.password;
let ref random_session_id = d.random_session_id;
let salt: i32 = 1234; // TODO use random salt when storing hashed user passwords
println!("Adding user {} {} {} {}", &username, &passwd, salt, random_session_id);
// Create keys for the user
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &random_session_id).unwrap();
let scheme: String = session.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let master_key_material: Vec<String> = serde_json::from_str(&session.key_material).unwrap();
let master_pk: bsw::CpAbePublicKey = serde_json::from_str(&master_key_material[0]).unwrap();
let master_mk: bsw::CpAbeMasterKey = serde_json::from_str(&master_key_material[1]).unwrap();
let user_sk: bsw::CpAbeSecretKey = bsw::keygen(&master_pk, &master_mk, &d.attributes).unwrap();
match db_add_user(&conn, &username, &passwd, salt, &d.attributes, &serde_json::to_string(&user_sk).unwrap(), random_session_id) {
Err(e) => {println!("Nope! {}", e); return Err(BadRequest(Some(format!("Failure adding userpk failure: {}", e))))},
Ok(_r) => return Ok(())
}
},
Err(_) => { return Err(BadRequest(Some(format!("Scheme {} not supported", scheme)))); }
}
}
#[post(path="/list_attrs", format="application/json", data="<d>")]
fn list_attrs(d:Json<ListAttrMsg>, key: ApiKey) -> Result<(String), BadRequest<String>> {
let conn: MysqlConnection = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &key.0).unwrap();
return Ok(session.key_material);
}
#[post(path="/setup", format="application/json", data="<d>")]
fn setup(d:Json<SetupMsg>) -> Result<(String), BadRequest<String>> {
let param: SetupMsg = d.into_inner();
let conn: MysqlConnection = db_connect();
let attributes: String = serde_json::to_string(¶m.attributes).unwrap();
// Setup of a new session. Create keys first
let key_gen_params = KeyGenMsg {
attributes: param.attributes,
scheme: "bsw".to_string()
};
println!("Creating key for {} attributes", key_gen_params.attributes.len());
let key_material: Vec<String> = match _keygen(key_gen_params) { // TODO NotNice: keygen returns a vector of strings. Instead it should return some Box<KeyMaterial> with functions like get_public_key() etc.
Ok(material) => material,
Err(e) => { return Err(BadRequest(Some(format!("Failure to create keys {}",e)))); }
};
// Write new session to database and return its id
let session = db_create_session(&conn, &String::from("bsw"), &key_material, &attributes);
return Ok(session.unwrap());
}
fn _keygen(param: KeyGenMsg) -> Result<Vec<String>, String> {
let scheme: String = param.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
// Generating mk
let (pk, mk): (bsw::CpAbePublicKey,bsw::CpAbeMasterKey) = bsw::setup();
let mut _attributes = param.attributes;
//Generating attribute keys
let res:bsw::CpAbeSecretKey = bsw::keygen(&pk, &mk, &_attributes).unwrap();
Ok(vec![serde_json::to_string(&pk).unwrap(),
serde_json::to_string(&mk).unwrap(),
serde_json::to_string(&res).unwrap()])
},
Err(e) => Err("Unsupported scheme".to_string())
}
}
// ------------------------------------------------------------
// Internal methods follow
// ------------------------------------------------------------
fn db_connect() -> MysqlConnection {
let database_url : String = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
MysqlConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url)) // TODO Replace MysqlConnection with more generic "Connection"?
}
/// Adds a user to database.
fn db_add_user(conn: &MysqlConnection, username: &String, passwd: &String, salt: i32, attributes: &Vec<String>, key_material: &String, random_session_id: &String) -> Result<usize, String> {
use schema::users;
use schema::sessions;
// Get primary key from sessions table
let session: schema::Session = match sessions::table.filter(sessions::random_session_id.eq(random_session_id)).first::<schema::Session>(conn) {
Ok(s) => s,
Err(_e) => {return Err(format!("No session with random id {} present. Cannot add user.", random_session_id)); }
};
match users::table
.filter(users::username.eq(username.to_string()))
.filter(users::session_id.eq(session.id))
.first::<schema::User>(conn) {
Ok(_u) => return Err("User already exists for this session".to_string()),
Err(_e) => {}
};
let user = schema::NewUser {
username: username.to_string(),
password: passwd.to_string(), // TODO store salted hash of pwd.
attributes: serde_json::to_string(attributes).unwrap(),
key_material: key_material.to_string(),
salt: salt,
session_id: session.id
};
match diesel::insert_into(users::table)
.values(&user)
.execute(conn) {
Ok(id) => Ok(id),
Err(_e) => Err("Could not insert user".to_string())
}
}
fn db_create_session(conn: &MysqlConnection, scheme: &String, key_material: &Vec<String>, attributes: &String) -> Result<String, String> {
use schema::sessions;
println!("Got scheme {}", scheme);
match scheme.parse::<SCHEMES>() {
Ok(_scheme) => {
let session_id: String = OsRng::new().unwrap().next_u64().to_string();
let session = schema::NewSession {
is_initialized: false,
scheme: scheme.to_string(),
random_session_id: session_id.clone(),
key_material: serde_json::to_string(key_material).unwrap(),
attributes: attributes.to_string()
};
println!("Key material is {}", session.key_material);
// Return auto-gen'd session id
match diesel::insert_into(sessions::table)
.values(&session)
.execute(conn) {
Ok(_usize) => Ok(session_id),
Err(_e) => Err("Could not insert into sessions".to_string())
}
}
Err(_) => Err("Invalid scheme".to_string())
}
}
fn db_get_session_by_api_key(conn: &MysqlConnection, api_key: &String) -> Result<schema::Session, diesel::result::Error> {
use schema::sessions;
sessions::table.filter(sessions::random_session_id.eq(api_key))
.first::<schema::Session>(conn)
}
fn _db_get_user_by_username<'a>(conn: &MysqlConnection, user: &'a String) -> Option<schema::User> {
use schema::users;
match users::table.filter(users::username.eq(user))
.first::<schema::User>(conn) {
Ok(u) => Some(u),
Err(_) => None
}
}
fn _db_get_users_by_apikey<'a>(conn: &MysqlConnection, api_key: &String) -> Vec<schema::User> {
use schema::sessions;
use schema::users;
users::table
.inner_join(sessions::table)
.filter(sessions::random_session_id.eq(api_key))
.get_results::<(schema::User, schema::Session)>(conn)
.expect("Could not load users by API key {}")
.into_iter().map(|(user, _session)| user).collect()
}
/// TODO Use to create salted hashed passwords
fn _to_db_passwd(plain_password: String, salt: i32) -> Blake2bResult {
let salted_pwd = plain_password + &salt.to_string();
let res = blake2b(64, &[], salted_pwd.as_bytes());
return res;
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![setup, list_attrs, encrypt, decrypt, add_user])
}
fn main() {
rocket().launch();
}
/// Returns true if `key` is a valid API key string.
fn is_valid(api_key: String) -> bool {
use schema::users;
use schema::sessions;
let k : String = match api_key.starts_with("Bearer ") {
true => api_key.replace("Bearer ", ""),
false => api_key
};
let conn = db_connect();
match users::table
.inner_join(sessions::table)
.filter(users::session_id.eq(sessions::id))
.filter(sessions::random_session_id.eq(k))
.count()
.get_result::<i64>(&conn) {
Ok(_) => return true,
Err(_e) => return false
}
}
// -----------------------------------------------
// Tests follow
// -----------------------------------------------
#[cfg(test)]
mod tests {
use super::rocket;
use rocket::local::Client;
use rocket::http::Status;
use super::*;
#[test]
fn test_db_user() {
let con = db_connect();
// make sure we have a session to test with
let session_id: String = db_create_session(&con, &String::from("bsw"), &vec!["".to_string()], &"".to_string()).unwrap();
// Write user into db
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let key_material: String = "".to_string();
let attributes: Vec<String>= vec!("".to_string());
let salt: i32 = 1234;
let result: usize = db_add_user(&con, &user, &passwd, salt, &attributes, &key_material, &session_id).unwrap();
assert!(result > 0);
// Check that it is there
let u: schema::User = _db_get_user_by_username(&con, &user).unwrap();
assert_eq!(u.username, user);
}
#[test]
fn test_db_session() {
let con = db_connect();
// Create a user
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let salt: i32 = 1234;
// Setup of a new session. Create keys
let key_gen_parms = KeyGenMsg {
attributes: vec!["attribute_1".to_string(), "attribute_2".to_string()],
scheme: "bsw".to_string()
};
let key_material: Vec<String> = _keygen(key_gen_parms).unwrap();
let attributes: Vec<String> = vec!(String::from(""));
let scheme: String = "bsw".to_string();
let session_id: String = db_create_session(&con, &scheme, &key_material, &serde_json::to_string(&attributes).unwrap()).expect("Could not create session");
println!("Got session id {}", session_id);
db_add_user(&con, &user, &passwd, salt, &attributes, &serde_json::to_string(&key_material).unwrap(), &session_id).expect("Failure adding user");
}
#[test]
fn test_setup() {
let client = Client::new(rocket()).expect("valid rocket instance");
println!("Have rocket");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id: String = response.body_string().unwrap();
println!("SETUP RETURNED {}", &session_id);
// Create user
let user = User {
random_session_id: session_id,
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let response_add: rocket::local::LocalResponse = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
//assert_eq!(response_add.status(), Status::Ok);
}
#[test]
fn test_encrypt_decrypt() {
let client = Client::new(rocket()).expect("valid rocket instance");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id : &String = &response.body_string().unwrap();
println!("Setup returned SessionID {}",session_id);
// Create user
let user = User {
random_session_id: session_id.to_string(),
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let _response_add = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
// Encrypt some text for a policy
let policy:String = String::from(r#"{"OR": [{"ATT": "attribute_1"}, {"ATT": "attribute_2"}]}"#);
let msg : EncMessage = EncMessage {
plaintext : "Encrypt me".into(),
policy : policy,
session_id : session_id.clone()
};
let mut resp_enc = client.post("/encrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&msg).expect("Encryption"))
.dispatch();
assert_eq!(resp_enc.status(), Status::Ok);
let ct:String = resp_enc.body_string().unwrap();
let ct_json = serde_json::from_str(&ct).unwrap();
// Decrypt again
let c : DecMessage = DecMessage {
ct: ct_json,
session_id: session_id.clone(),
username: String::from("admin"),
password: String::from("admin")
};
let mut resp_dec = client.post("/decrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&c).unwrap())
.dispatch();
let pt_hex: String = resp_dec.body_string().unwrap();
println!("HEX: {}", pt_hex);
let mut pt: String = serde_json::from_str(&pt_hex).expect("From json");
pt = pt.trim().to_string();
println!("RESULT: {}", pt);
assert_eq!(pt, "Encrypt me");
}
}
|
return Outcome::Failure((Status::Unauthorized, ()));
}
return Outcome::Success(ApiKey(key.to_string()));
}
|
random_line_split
|
server.rs
|
// Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct Server<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ {
self.extensions.drain(..)
}
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
|
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method!= Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version!= Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) {
protocols.push(p)
}
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
|
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
|
random_line_split
|
server.rs
|
// Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct Server<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ {
self.extensions.drain(..)
}
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method!= Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version!= Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value)
|
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
|
{
protocols.push(p)
}
|
conditional_block
|
server.rs
|
// Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct Server<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_
|
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method!= Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version!= Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) {
protocols.push(p)
}
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
|
{
self.extensions.drain(..)
}
|
identifier_body
|
server.rs
|
// Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct
|
<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ {
self.extensions.drain(..)
}
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method!= Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version!= Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) {
protocols.push(p)
}
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
|
Server
|
identifier_name
|
vga_buffer.rs
|
use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
|
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer:'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2 { // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
}
|
LightGreen = 10,
LightCyan = 11,
|
random_line_split
|
vga_buffer.rs
|
use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer:'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2
|
else {
assert_eq!(screen_char, empty_char());
}
}
}
}
}
|
{ // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
}
|
conditional_block
|
vga_buffer.rs
|
use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum
|
{
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer:'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2 { // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
}
|
Color
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.