hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
1c1b1b58402e830393a9f2e57a43918d414f6f85 | 1,479 | // FIXME: Ideally these suggestions would be fixed via rustfix. Blocked by rust-lang/rust#53934
// // run-rustfix
#![allow(clippy::write_literal)]
#![warn(clippy::write_with_newline)]
use std::io::Write;
fn main() {
let mut v = Vec::new();
// These should fail
write!(&mut v, "Hello\n");
write!(&mut v, "Hello {}\n", "world");
write!(&mut v, "Hello {} {}\n", "world", "#2");
write!(&mut v, "{}\n", 1265);
write!(&mut v, "\n");
// These should be fine
write!(&mut v, "");
write!(&mut v, "Hello");
writeln!(&mut v, "Hello");
writeln!(&mut v, "Hello\n");
writeln!(&mut v, "Hello {}\n", "world");
write!(&mut v, "Issue\n{}", 1265);
write!(&mut v, "{}", 1265);
write!(&mut v, "\n{}", 1275);
write!(&mut v, "\n\n");
write!(&mut v, "like eof\n\n");
write!(&mut v, "Hello {} {}\n\n", "world", "#2");
writeln!(&mut v, "\ndon't\nwarn\nfor\nmultiple\nnewlines\n"); // #3126
writeln!(&mut v, "\nbla\n\n"); // #3126
// Escaping
write!(&mut v, "\\n"); // #3514
write!(&mut v, "\\\n"); // should fail
write!(&mut v, "\\\\n");
// Raw strings
write!(&mut v, r"\n"); // #3778
// Literal newlines should also fail
write!(
&mut v,
"
"
);
write!(
&mut v,
r"
"
);
// Don't warn on CRLF (#4208)
write!(&mut v, "\r\n");
write!(&mut v, "foo\r\n");
write!(&mut v, "\\r\n"); //~ ERROR
write!(&mut v, "foo\rbar\n");
}
| 24.65 | 95 | 0.491548 |
e4dfd62cb36ab167c7d3365f71db4a48a47c9e92 | 14,161 | use crate::*;
use host::{WaterboxHost};
use std::{os::raw::c_char, io, ffi::{/*CString, */CStr}};
use context::ExternalCallback;
/// The memory template for a WaterboxHost. Don't worry about
/// making every size as small as possible, since the savestater handles sparse regions
/// well enough. All values should be PAGESIZE aligned.
#[repr(C)]
pub struct MemoryLayoutTemplate {
/// Memory space to serve brk(2)
pub sbrk_size: usize,
/// Memory space to serve alloc_sealed(3)
pub sealed_size: usize,
/// Memory space to serve alloc_invisible(3)
pub invis_size: usize,
/// Memory space to serve alloc_plain(3)
pub plain_size: usize,
/// Memory space to serve mmap(2) and friends.
/// Calls without MAP_FIXED or MREMAP_FIXED will be placed in this area.
/// TODO: Are we allowing fixed calls to happen anywhere in the block?
pub mmap_size: usize,
}
impl MemoryLayoutTemplate {
/// checks a memory layout for validity
pub fn make_layout(&self, elf_addr: AddressRange) -> anyhow::Result<WbxSysLayout> {
let mut res = unsafe { std::mem::zeroed::<WbxSysLayout>() };
res.elf = elf_addr.align_expand();
let mut end = res.elf.end();
let mut add_area = |size| {
let a = AddressRange {
start: end,
size: align_up(size)
};
end = a.end();
a
};
res.main_thread = add_area(1 << 20);
res.sbrk = add_area(self.sbrk_size);
res.sealed = add_area(self.sealed_size);
res.invis = add_area(self.invis_size);
res.plain = add_area(self.plain_size);
res.mmap = add_area(self.mmap_size);
if res.all().start >> 32 != (res.all().end() - 1) >> 32 {
Err(anyhow!("HostMemoryLayout must fit into a single 4GiB region!"))
} else {
Ok(res)
}
}
}
/// "return" struct. On successful funtion call, error_message[0] will be 0 and data will be the return value.
/// On failed call, error_message will contain a string describing the error, and data will be unspecified.
/// Any function that takes this object as an argument can fail and should be checked for failure, even if
/// it does not return data.
#[repr(C)]
pub struct Return<T> {
pub error_message: [u8; 1024],
pub data: T,
}
impl<T> Return<T> {
pub fn put(&mut self, result: anyhow::Result<T>) {
match result {
Err(e) => {
let s = format!("Waterbox Error: {:?}", e);
let len = std::cmp::min(s.len(), 1023);
self.error_message[0..len].copy_from_slice(&s.as_bytes()[0..len]);
self.error_message[len] = 0;
},
Ok(t) => {
self.error_message[0] = 0;
self.data = t;
}
}
}
}
/// write bytes. Return 0 on success, or < 0 on failure.
/// Must write all provided bytes in one call or fail, not permitted to write less (unlike reader).
pub type WriteCallback = extern fn(userdata: usize, data: *const u8, size: usize) -> i32;
struct CWriter {
/// will be passed to callback
pub userdata: usize,
pub callback: WriteCallback,
}
impl Write for CWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let res = (self.callback)(self.userdata, buf.as_ptr(), buf.len());
if res < 0 {
Err(io::Error::new(io::ErrorKind::Other, "Callback signaled abnormal failure"))
} else {
Ok(buf.len())
}
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.write(buf)?;
Ok(())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// Read bytes into the buffer. Return number of bytes read on success, or < 0 on failure.
/// permitted to read less than the provided buffer size, but must always read at least 1
/// byte if EOF is not reached. If EOF is reached, should return 0.
pub type ReadCallback = extern fn(userdata: usize, data: *mut u8, size: usize) -> isize;
struct CReader {
pub userdata: usize,
pub callback: ReadCallback,
}
impl Read for CReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let res = (self.callback)(self.userdata, buf.as_mut_ptr(), buf.len());
if res < 0 {
Err(io::Error::new(io::ErrorKind::Other, "Callback signaled abnormal failure"))
} else {
Ok(res as usize)
}
}
}
// #[repr(C)]
// pub struct MissingFileCallback {
// pub userdata: usize,
// pub callback: extern fn(userdata: usize, name: *const c_char) -> *mut MissingFileResult,
// }
// #[repr(C)]
// pub struct MissingFileResult {
// pub reader: CReader,
// pub writable: bool,
// }
fn arg_to_str(arg: *const c_char) -> anyhow::Result<String> {
let cs = unsafe { CStr::from_ptr(arg as *const c_char) };
match cs.to_str() {
Ok(s) => Ok(s.to_string()),
Err(_) => Err(anyhow!("Bad UTF-8 string")),
}
}
fn read_whole_file(reader: &mut CReader) -> anyhow::Result<Vec<u8>> {
let mut res = Vec::<u8>::new();
io::copy(reader, &mut res)?;
Ok(res)
}
/// Given a guest executable and a memory layout, create a new host environment. All data will be immediately consumed from the reader,
/// which will not be used after this call.
#[no_mangle]
pub extern fn wbx_create_host(layout: &MemoryLayoutTemplate, module_name: *const c_char, callback: ReadCallback, userdata: usize, ret: &mut Return<*mut WaterboxHost>) {
let mut reader = CReader {
userdata,
callback
};
let res = (|| {
let data = read_whole_file(&mut reader)?;
WaterboxHost::new(data, &arg_to_str(module_name)?[..], layout)
})();
ret.put(res.map(|boxed| Box::into_raw(boxed)));
}
/// Tear down a host environment. If called while the environment is active, will deactivate it first.
#[no_mangle]
pub extern fn wbx_destroy_host(obj: *mut WaterboxHost, ret: &mut Return<()>) {
let res = (|| {
unsafe {
Box::from_raw(obj);
Ok(())
}
})();
ret.put(res);
}
/// Activate a host environment. This swaps it into memory and makes it available for use.
/// Pointers to inside the environment are only valid while active. Callbacks into the environment can only be called
/// while active. Uses a mutex internally so as to not stomp over other host environments in the same 4GiB slice.
/// Ignored if host is already active.
#[no_mangle]
pub extern fn wbx_activate_host(obj: &mut WaterboxHost, ret: &mut Return<()>) {
let res = (|| {
obj.activate();
Ok(())
})();
ret.put(res);
}
/// Deactivates a host environment, and releases the mutex.
/// Ignored if host is not active
#[no_mangle]
pub extern fn wbx_deactivate_host(obj: &mut WaterboxHost, ret: &mut Return<()>) {
obj.deactivate();
ret.put(Ok(()));
}
/// Returns a thunk suitable for calling an exported function from the guest executable. This pointer is only valid
/// while the host is active. A missing proc is not an error and simply returns 0. The guest function must be,
/// and the returned callback will be, sysv abi, and will only pass up to 6 int/ptr args and no other arg types.
#[no_mangle]
pub extern fn wbx_get_proc_addr(obj: &mut WaterboxHost, name: *const c_char, ret: &mut Return<usize>) {
match arg_to_str(name) {
Ok(s) => {
ret.put(obj.get_proc_addr(&s));
},
Err(e) => {
ret.put(Err(e))
}
}
}
/// Returns a thunk suitable for calling an arbitrary entry point into the guest executable. This pointer is only valid
/// while the host is active. wbx_get_proc_addr already calls this internally on pointers it returns, so this call is
/// only needed if the guest exposes callin pointers that aren't named exports (for instance, if a function returns
/// a pointer to another function).
#[no_mangle]
pub extern fn wbx_get_callin_addr(obj: &mut WaterboxHost, ptr: usize, ret: &mut Return<usize>) {
ret.put(obj.get_external_callin_ptr(ptr));
}
/// Returns the raw address of a function exported from the guest. `wbx_get_proc_addr()` is equivalent to
/// `wbx_get_callin_addr(wbx_get_proc_addr_raw()). Most things should not use this directly, as the returned
/// pointer will not have proper stack hygiene and will crash on syscalls from the guest.
#[no_mangle]
pub extern fn wbx_get_proc_addr_raw(obj: &mut WaterboxHost, name: *const c_char, ret: &mut Return<usize>) {
match arg_to_str(name) {
Ok(s) => {
ret.put(obj.get_proc_addr_raw(&s));
},
Err(e) => {
ret.put(Err(e))
}
}
}
/// Returns a function pointer suitable for passing to the guest to allow it to call back while active.
/// Slot number is an integer that is used to keep pointers consistent across runs: If the host is loaded
/// at a different address, and some external function `foo` moves from run to run, things will still work out
/// in the guest because `foo` was bound to the same slot and a particular slot gives a consistent pointer.
/// The returned thunk will be, and the callback must be, sysv abi and will only pass up to 6 int/ptr args and no other arg types.
#[no_mangle]
pub extern fn wbx_get_callback_addr(obj: &mut WaterboxHost, callback: ExternalCallback, slot: usize, ret: &mut Return<usize>) {
ret.put(obj.get_external_callback_ptr(callback, slot));
}
/// Calls the seal operation, which is a one time action that prepares the host to save states.
#[no_mangle]
pub extern fn wbx_seal(obj: &mut WaterboxHost, ret: &mut Return<()>) {
ret.put(obj.seal());
}
/// Mounts a file in the environment. All data will be immediately consumed from the reader, which will not be used after this call.
/// To prevent nondeterminism, adding and removing files is very limited WRT savestates. If a file is writable, it must never exist
/// when save_state is called, and can only be used for transient operations. If a file is readable, it can appear in savestates,
/// but it must exist in every savestate and the exact sequence of add_file calls must be consistent from savestate to savestate.
#[no_mangle]
pub extern fn wbx_mount_file(obj: &mut WaterboxHost, name: *const c_char, callback: ReadCallback, userdata: usize, writable: bool, ret: &mut Return<()>) {
let mut reader = CReader {
userdata,
callback
};
let res: anyhow::Result<()> = (|| {
obj.mount_file(arg_to_str(name)?, read_whole_file(&mut reader)?, writable)?;
Ok(())
})();
ret.put(res);
}
/// Remove a file previously added. Writer is optional; if provided, the contents of the file at time of removal will be dumped to it.
/// It is an error to remove a file which is currently open in the guest.
#[no_mangle]
pub extern fn wbx_unmount_file(obj: &mut WaterboxHost, name: *const c_char, callback_opt: Option<WriteCallback>, userdata: usize, ret: &mut Return<()>) {
let res: anyhow::Result<()> = (|| {
let data = obj.unmount_file(&arg_to_str(name)?)?;
if let Some(callback) = callback_opt {
let mut writer = CWriter {
userdata,
callback
};
io::copy(&mut &data[..], &mut writer)?;
}
Ok(())
})();
ret.put(res);
}
/// Set (or clear, with None) a callback to be called whenever the guest tries to load a nonexistant file.
/// The callback will be provided with the name of the requested load, and can either return null to signal the waterbox
/// to return ENOENT to the guest, or a struct to immediately load that file. You may not call any wbx methods
/// in the callback. If the MissingFileResult is provided, it will be consumed immediately and will have the same effect
/// as wbx_mount_file(). You may free resources associated with the MissingFileResult whenever control next returns to your code.
// #[no_mangle]
// pub extern fn wbx_set_missing_file_callback(obj: &mut WaterboxHost, mfc_o: Option<&MissingFileCallback>) {
// match mfc_o {
// None => obj.set_missing_file_callback(None),
// Some(mfc) => {
// let userdata = mfc.userdata;
// let callback = mfc.callback;
// obj.set_missing_file_callback(Some(Box::new(move |name| {
// let namestr = CString::new(name).unwrap();
// let mfr = callback(userdata, namestr.as_ptr() as *const c_char);
// if mfr == 0 as *mut MissingFileResult {
// return None
// }
// unsafe {
// let data = read_whole_file(&mut (*mfr).reader);
// match data {
// Ok(d) => Some(fs::MissingFileResult {
// data: d,
// writable: (*mfr).writable
// }),
// Err(_) => None,
// }
// }
// })));
// }
// }
// }
/// Save state. Must not be called before seal. Must not be called with any writable files mounted.
/// Must always be called with the same sequence and contents of readonly files.
#[no_mangle]
pub extern fn wbx_save_state(obj: &mut WaterboxHost, callback: WriteCallback, userdata: usize, ret: &mut Return<()>) {
let mut writer = CWriter {
userdata,
callback
};
let res: anyhow::Result<()> = (|| {
obj.save_state(&mut writer)?;
Ok(())
})();
ret.put(res);
}
/// Load state. Must not be called before seal. Must not be called with any writable files mounted.
/// Must always be called with the same sequence and contents of readonly files that were in the save state.
/// Must be called with the same wbx executable and memory layout as in the savestate.
/// Errors generally poison the environment; sorry!
#[no_mangle]
pub extern fn wbx_load_state(obj: &mut WaterboxHost, callback: ReadCallback, userdata: usize, ret: &mut Return<()>) {
let mut reader = CReader {
userdata,
callback
};
ret.put(obj.load_state(&mut reader));
}
/// Control whether the host automatically evicts blocks from memory when they are not active. For the best performance,
/// this should be set to false. Set to true to help catch dangling pointer issues. Will be ignored (and forced to true)
/// if waterboxhost was built in debug mode. This is a single global setting.
#[no_mangle]
pub extern fn wbx_set_always_evict_blocks(_val: bool) {
#[cfg(not(debug_assertions))]
{
unsafe { ALWAYS_EVICT_BLOCKS = _val; }
}
}
/// Retrieve the number of pages of guest memory that this host is tracking
#[no_mangle]
pub extern fn wbx_get_page_len(obj: &mut WaterboxHost, ret: &mut Return<usize>) {
ret.put(Ok(obj.page_len()))
}
/// Retrieve basic information for a tracked guest page. Index should be in 0..wbx_get_page_len().
/// 1 - readable, implies allocated
/// 2 - writable
/// 4 - executable
/// 0x10 - stack
/// 0x20 - allocated but not readable (guest-generated "guard")
/// 0x40 - invisible
/// 0x80 - dirty
#[no_mangle]
pub extern fn wbx_get_page_data(obj: &mut WaterboxHost, index: usize, ret: &mut Return<u8>) {
if index >= obj.page_len() {
ret.put(Err(anyhow!("Index out of range")))
} else {
ret.put(Ok(obj.page_info(index)))
}
}
| 37.265789 | 168 | 0.691053 |
38f1f12c10c3f617b5ad5f6304373b8a18643d35 | 11,344 | #[doc = "Register `MATRIX_MRCR` reader"]
pub struct R(crate::R<MATRIX_MRCR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<MATRIX_MRCR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<MATRIX_MRCR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<MATRIX_MRCR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `MATRIX_MRCR` writer"]
pub struct W(crate::W<MATRIX_MRCR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<MATRIX_MRCR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<MATRIX_MRCR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<MATRIX_MRCR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `RCB0` reader - Remap Command Bit for Master 0"]
pub struct RCB0_R(crate::FieldReader<bool, bool>);
impl RCB0_R {
pub(crate) fn new(bits: bool) -> Self {
RCB0_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB0_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB0` writer - Remap Command Bit for Master 0"]
pub struct RCB0_W<'a> {
w: &'a mut W,
}
impl<'a> RCB0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Field `RCB1` reader - Remap Command Bit for Master 1"]
pub struct RCB1_R(crate::FieldReader<bool, bool>);
impl RCB1_R {
pub(crate) fn new(bits: bool) -> Self {
RCB1_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB1_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB1` writer - Remap Command Bit for Master 1"]
pub struct RCB1_W<'a> {
w: &'a mut W,
}
impl<'a> RCB1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Field `RCB2` reader - Remap Command Bit for Master 2"]
pub struct RCB2_R(crate::FieldReader<bool, bool>);
impl RCB2_R {
pub(crate) fn new(bits: bool) -> Self {
RCB2_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB2_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB2` writer - Remap Command Bit for Master 2"]
pub struct RCB2_W<'a> {
w: &'a mut W,
}
impl<'a> RCB2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `RCB3` reader - Remap Command Bit for Master 3"]
pub struct RCB3_R(crate::FieldReader<bool, bool>);
impl RCB3_R {
pub(crate) fn new(bits: bool) -> Self {
RCB3_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB3_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB3` writer - Remap Command Bit for Master 3"]
pub struct RCB3_W<'a> {
w: &'a mut W,
}
impl<'a> RCB3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
#[doc = "Field `RCB4` reader - Remap Command Bit for Master 4"]
pub struct RCB4_R(crate::FieldReader<bool, bool>);
impl RCB4_R {
pub(crate) fn new(bits: bool) -> Self {
RCB4_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB4_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB4` writer - Remap Command Bit for Master 4"]
pub struct RCB4_W<'a> {
w: &'a mut W,
}
impl<'a> RCB4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
#[doc = "Field `RCB5` reader - Remap Command Bit for Master 5"]
pub struct RCB5_R(crate::FieldReader<bool, bool>);
impl RCB5_R {
pub(crate) fn new(bits: bool) -> Self {
RCB5_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB5_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB5` writer - Remap Command Bit for Master 5"]
pub struct RCB5_W<'a> {
w: &'a mut W,
}
impl<'a> RCB5_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5);
self.w
}
}
#[doc = "Field `RCB6` reader - Remap Command Bit for Master 6"]
pub struct RCB6_R(crate::FieldReader<bool, bool>);
impl RCB6_R {
pub(crate) fn new(bits: bool) -> Self {
RCB6_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RCB6_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RCB6` writer - Remap Command Bit for Master 6"]
pub struct RCB6_W<'a> {
w: &'a mut W,
}
impl<'a> RCB6_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6);
self.w
}
}
impl R {
#[doc = "Bit 0 - Remap Command Bit for Master 0"]
#[inline(always)]
pub fn rcb0(&self) -> RCB0_R {
RCB0_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Remap Command Bit for Master 1"]
#[inline(always)]
pub fn rcb1(&self) -> RCB1_R {
RCB1_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Remap Command Bit for Master 2"]
#[inline(always)]
pub fn rcb2(&self) -> RCB2_R {
RCB2_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Remap Command Bit for Master 3"]
#[inline(always)]
pub fn rcb3(&self) -> RCB3_R {
RCB3_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Remap Command Bit for Master 4"]
#[inline(always)]
pub fn rcb4(&self) -> RCB4_R {
RCB4_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Remap Command Bit for Master 5"]
#[inline(always)]
pub fn rcb5(&self) -> RCB5_R {
RCB5_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Remap Command Bit for Master 6"]
#[inline(always)]
pub fn rcb6(&self) -> RCB6_R {
RCB6_R::new(((self.bits >> 6) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Remap Command Bit for Master 0"]
#[inline(always)]
pub fn rcb0(&mut self) -> RCB0_W {
RCB0_W { w: self }
}
#[doc = "Bit 1 - Remap Command Bit for Master 1"]
#[inline(always)]
pub fn rcb1(&mut self) -> RCB1_W {
RCB1_W { w: self }
}
#[doc = "Bit 2 - Remap Command Bit for Master 2"]
#[inline(always)]
pub fn rcb2(&mut self) -> RCB2_W {
RCB2_W { w: self }
}
#[doc = "Bit 3 - Remap Command Bit for Master 3"]
#[inline(always)]
pub fn rcb3(&mut self) -> RCB3_W {
RCB3_W { w: self }
}
#[doc = "Bit 4 - Remap Command Bit for Master 4"]
#[inline(always)]
pub fn rcb4(&mut self) -> RCB4_W {
RCB4_W { w: self }
}
#[doc = "Bit 5 - Remap Command Bit for Master 5"]
#[inline(always)]
pub fn rcb5(&mut self) -> RCB5_W {
RCB5_W { w: self }
}
#[doc = "Bit 6 - Remap Command Bit for Master 6"]
#[inline(always)]
pub fn rcb6(&mut self) -> RCB6_W {
RCB6_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Master Remap Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [matrix_mrcr](index.html) module"]
pub struct MATRIX_MRCR_SPEC;
impl crate::RegisterSpec for MATRIX_MRCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [matrix_mrcr::R](R) reader structure"]
impl crate::Readable for MATRIX_MRCR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [matrix_mrcr::W](W) writer structure"]
impl crate::Writable for MATRIX_MRCR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets MATRIX_MRCR to value 0"]
impl crate::Resettable for MATRIX_MRCR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.161954 | 421 | 0.562764 |
efd7f362b152e7cc371656fd8baa49451b4fb5c4 | 1,434 | use reqwest::Method as HttpMethod;
use crate::client::Client;
use crate::client::Response;
use crate::client::Result;
pub use self::models::*;
mod models;
/// This structure models all the actions related to the `/v2/measurements` namespace.
/// You shouldn't use it on your own - refer to the `AirlyClient` facade instead.
pub struct MeasurementsClient {
client: Client,
}
impl MeasurementsClient {
pub fn new(key: String) -> Self {
Self {
client: Client::new(key),
}
}
/// Returns measurements for installation with specified id.
/// <https://developer.airly.eu/docs#endpoints.measurements.installation>
///
/// # Example
///
/// ```rust
/// let client = AirlyClient::new("my-api-key");
/// println!("{:#?}", client.measurements().for_installation(250));
/// ```
pub fn get(&self, installation_id: u32) -> Result<Response<Measurements>> {
self.client.perform(
HttpMethod::GET,
format!("measurements/installation?installationId={}", installation_id),
)
}
/// Returns measurements for specific point.
/// <https://developer.airly.eu/docs#endpoints.measurements.point>
pub fn for_point(&self, lat: f32, lng: f32) -> Result<Response<Measurements>> {
self.client.perform(
HttpMethod::GET,
format!("measurements/point?lat={}&lng={}", lat, lng),
)
}
}
| 29.265306 | 86 | 0.620642 |
22d850dc118f8826d6749da976d4f68598326bd4 | 89,794 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Contains `ArrayData`, a generic representation of Arrow array data which encapsulates
//! common attributes and operations for Arrow array.
use crate::datatypes::{DataType, IntervalUnit, UnionMode};
use crate::error::{ArrowError, Result};
use crate::{bitmap::Bitmap, datatypes::ArrowNativeType};
use crate::{
buffer::{Buffer, MutableBuffer},
util::bit_util,
};
use half::f16;
use std::convert::TryInto;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use super::equal::equal;
#[inline]
pub(crate) fn count_nulls(
null_bit_buffer: Option<&Buffer>,
offset: usize,
len: usize,
) -> usize {
if let Some(buf) = null_bit_buffer {
len.checked_sub(buf.count_set_bits_offset(offset, len))
.unwrap()
} else {
0
}
}
/// creates 2 [`MutableBuffer`]s with a given `capacity` (in slots).
#[inline]
pub(crate) fn new_buffers(data_type: &DataType, capacity: usize) -> [MutableBuffer; 2] {
let empty_buffer = MutableBuffer::new(0);
match data_type {
DataType::Null => [empty_buffer, MutableBuffer::new(0)],
DataType::Boolean => {
let bytes = bit_util::ceil(capacity, 8);
let buffer = MutableBuffer::new(bytes);
[buffer, empty_buffer]
}
DataType::UInt8 => [
MutableBuffer::new(capacity * mem::size_of::<u8>()),
empty_buffer,
],
DataType::UInt16 => [
MutableBuffer::new(capacity * mem::size_of::<u16>()),
empty_buffer,
],
DataType::UInt32 => [
MutableBuffer::new(capacity * mem::size_of::<u32>()),
empty_buffer,
],
DataType::UInt64 => [
MutableBuffer::new(capacity * mem::size_of::<u64>()),
empty_buffer,
],
DataType::Int8 => [
MutableBuffer::new(capacity * mem::size_of::<i8>()),
empty_buffer,
],
DataType::Int16 => [
MutableBuffer::new(capacity * mem::size_of::<i16>()),
empty_buffer,
],
DataType::Int32 => [
MutableBuffer::new(capacity * mem::size_of::<i32>()),
empty_buffer,
],
DataType::Int64 => [
MutableBuffer::new(capacity * mem::size_of::<i64>()),
empty_buffer,
],
DataType::Float16 => [
MutableBuffer::new(capacity * mem::size_of::<f16>()),
empty_buffer,
],
DataType::Float32 => [
MutableBuffer::new(capacity * mem::size_of::<f32>()),
empty_buffer,
],
DataType::Float64 => [
MutableBuffer::new(capacity * mem::size_of::<f64>()),
empty_buffer,
],
DataType::Date32 | DataType::Time32(_) => [
MutableBuffer::new(capacity * mem::size_of::<i32>()),
empty_buffer,
],
DataType::Date64
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Timestamp(_, _) => [
MutableBuffer::new(capacity * mem::size_of::<i64>()),
empty_buffer,
],
DataType::Interval(IntervalUnit::YearMonth) => [
MutableBuffer::new(capacity * mem::size_of::<i32>()),
empty_buffer,
],
DataType::Interval(IntervalUnit::DayTime) => [
MutableBuffer::new(capacity * mem::size_of::<i64>()),
empty_buffer,
],
DataType::Interval(IntervalUnit::MonthDayNano) => [
MutableBuffer::new(capacity * mem::size_of::<i128>()),
empty_buffer,
],
DataType::Utf8 | DataType::Binary => {
let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i32>());
// safety: `unsafe` code assumes that this buffer is initialized with one element
buffer.push(0i32);
[buffer, MutableBuffer::new(capacity * mem::size_of::<u8>())]
}
DataType::LargeUtf8 | DataType::LargeBinary => {
let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i64>());
// safety: `unsafe` code assumes that this buffer is initialized with one element
buffer.push(0i64);
[buffer, MutableBuffer::new(capacity * mem::size_of::<u8>())]
}
DataType::List(_) | DataType::Map(_, _) => {
// offset buffer always starts with a zero
let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i32>());
buffer.push(0i32);
[buffer, empty_buffer]
}
DataType::LargeList(_) => {
// offset buffer always starts with a zero
let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i64>());
buffer.push(0i64);
[buffer, empty_buffer]
}
DataType::FixedSizeBinary(size) => {
[MutableBuffer::new(capacity * *size as usize), empty_buffer]
}
DataType::Dictionary(child_data_type, _) => match child_data_type.as_ref() {
DataType::UInt8 => [
MutableBuffer::new(capacity * mem::size_of::<u8>()),
empty_buffer,
],
DataType::UInt16 => [
MutableBuffer::new(capacity * mem::size_of::<u16>()),
empty_buffer,
],
DataType::UInt32 => [
MutableBuffer::new(capacity * mem::size_of::<u32>()),
empty_buffer,
],
DataType::UInt64 => [
MutableBuffer::new(capacity * mem::size_of::<u64>()),
empty_buffer,
],
DataType::Int8 => [
MutableBuffer::new(capacity * mem::size_of::<i8>()),
empty_buffer,
],
DataType::Int16 => [
MutableBuffer::new(capacity * mem::size_of::<i16>()),
empty_buffer,
],
DataType::Int32 => [
MutableBuffer::new(capacity * mem::size_of::<i32>()),
empty_buffer,
],
DataType::Int64 => [
MutableBuffer::new(capacity * mem::size_of::<i64>()),
empty_buffer,
],
_ => unreachable!(),
},
DataType::FixedSizeList(_, _) | DataType::Struct(_) => {
[empty_buffer, MutableBuffer::new(0)]
}
DataType::Decimal(_, _) => [
MutableBuffer::new(capacity * mem::size_of::<u8>()),
empty_buffer,
],
DataType::Union(_, mode) => {
let type_ids = MutableBuffer::new(capacity * mem::size_of::<i8>());
match mode {
UnionMode::Sparse => [type_ids, empty_buffer],
UnionMode::Dense => {
let offsets = MutableBuffer::new(capacity * mem::size_of::<i32>());
[type_ids, offsets]
}
}
}
}
}
/// Maps 2 [`MutableBuffer`]s into a vector of [Buffer]s whose size depends on `data_type`.
#[inline]
pub(crate) fn into_buffers(
data_type: &DataType,
buffer1: MutableBuffer,
buffer2: MutableBuffer,
) -> Vec<Buffer> {
match data_type {
DataType::Null | DataType::Struct(_) => vec![],
DataType::Utf8
| DataType::Binary
| DataType::LargeUtf8
| DataType::LargeBinary
| DataType::Union(_, _) => vec![buffer1.into(), buffer2.into()],
_ => vec![buffer1.into()],
}
}
/// An generic representation of Arrow array data which encapsulates common attributes and
/// operations for Arrow array. Specific operations for different arrays types (e.g.,
/// primitive, list, struct) are implemented in `Array`.
#[derive(Debug, Clone)]
pub struct ArrayData {
/// The data type for this array data
data_type: DataType,
/// The number of elements in this array data
len: usize,
/// The number of null elements in this array data
null_count: usize,
/// The offset into this array data, in number of items
offset: usize,
/// The buffers for this array data. Note that depending on the array types, this
/// could hold different kinds of buffers (e.g., value buffer, value offset buffer)
/// at different positions.
buffers: Vec<Buffer>,
/// The child(ren) of this array. Only non-empty for nested types, currently
/// `ListArray` and `StructArray`.
child_data: Vec<ArrayData>,
/// The null bitmap. A `None` value for this indicates all values are non-null in
/// this array.
null_bitmap: Option<Bitmap>,
}
pub type ArrayDataRef = Arc<ArrayData>;
impl ArrayData {
/// Create a new ArrayData instance;
///
/// If `null_count` is not specified, the number of nulls in
/// null_bit_buffer is calculated
///
/// # Safety
///
/// The input values *must* form a valid Arrow array for
/// `data_type`, or undefined behavior can results.
///
/// Note: This is a low level API and most users of the arrow
/// crate should create arrays using the methods in the `array`
/// module.
pub unsafe fn new_unchecked(
data_type: DataType,
len: usize,
null_count: Option<usize>,
null_bit_buffer: Option<Buffer>,
offset: usize,
buffers: Vec<Buffer>,
child_data: Vec<ArrayData>,
) -> Self {
let null_count = match null_count {
None => count_nulls(null_bit_buffer.as_ref(), offset, len),
Some(null_count) => null_count,
};
let null_bitmap = null_bit_buffer.map(Bitmap::from);
Self {
data_type,
len,
null_count,
offset,
buffers,
child_data,
null_bitmap,
}
}
/// Create a new ArrayData, validating that the provided buffers
/// form a valid Arrow array of the specified data type.
///
/// If `null_count` is not specified, the number of nulls in
/// null_bit_buffer is calculated
///
/// Note: This is a low level API and most users of the arrow
/// crate should create arrays using the methods in the `array`
/// module.
pub fn try_new(
data_type: DataType,
len: usize,
null_count: Option<usize>,
null_bit_buffer: Option<Buffer>,
offset: usize,
buffers: Vec<Buffer>,
child_data: Vec<ArrayData>,
) -> Result<Self> {
// Safety justification: `validate_full` is called below
let new_self = unsafe {
Self::new_unchecked(
data_type,
len,
null_count,
null_bit_buffer,
offset,
buffers,
child_data,
)
};
// As the data is not trusted, do a full validation of its contents
new_self.validate_full()?;
Ok(new_self)
}
/// Returns a builder to construct a `ArrayData` instance.
#[inline]
pub const fn builder(data_type: DataType) -> ArrayDataBuilder {
ArrayDataBuilder::new(data_type)
}
/// Returns a reference to the data type of this array data
#[inline]
pub const fn data_type(&self) -> &DataType {
&self.data_type
}
/// Updates the [DataType] of this ArrayData/
///
/// panic's if the new DataType is not compatible with the
/// existing type.
///
/// Note: currently only changing a [DataType::Decimal]s precision
/// and scale are supported
#[inline]
pub(crate) fn with_data_type(mut self, new_data_type: DataType) -> Self {
assert!(
matches!(self.data_type, DataType::Decimal(_, _)),
"only DecimalType is supported for existing type"
);
assert!(
matches!(new_data_type, DataType::Decimal(_, _)),
"only DecimalType is supported for new datatype"
);
self.data_type = new_data_type;
self
}
/// Returns a slice of buffers for this array data
pub fn buffers(&self) -> &[Buffer] {
&self.buffers[..]
}
/// Returns a slice of children data arrays
pub fn child_data(&self) -> &[ArrayData] {
&self.child_data[..]
}
/// Returns whether the element at index `i` is null
pub fn is_null(&self, i: usize) -> bool {
if let Some(ref b) = self.null_bitmap {
return !b.is_set(self.offset + i);
}
false
}
/// Returns a reference to the null bitmap of this array data
#[inline]
pub const fn null_bitmap(&self) -> &Option<Bitmap> {
&self.null_bitmap
}
/// Returns a reference to the null buffer of this array data.
pub fn null_buffer(&self) -> Option<&Buffer> {
self.null_bitmap().as_ref().map(|b| b.buffer_ref())
}
/// Returns whether the element at index `i` is not null
pub fn is_valid(&self, i: usize) -> bool {
if let Some(ref b) = self.null_bitmap {
return b.is_set(self.offset + i);
}
true
}
/// Returns the length (i.e., number of elements) of this array
#[inline]
pub const fn len(&self) -> usize {
self.len
}
// Returns whether array data is empty
#[inline]
pub const fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns the offset of this array
#[inline]
pub const fn offset(&self) -> usize {
self.offset
}
/// Returns the total number of nulls in this array
#[inline]
pub const fn null_count(&self) -> usize {
self.null_count
}
/// Returns the total number of bytes of memory occupied by the buffers owned by this [ArrayData].
pub fn get_buffer_memory_size(&self) -> usize {
let mut size = 0;
for buffer in &self.buffers {
size += buffer.capacity();
}
if let Some(bitmap) = &self.null_bitmap {
size += bitmap.get_buffer_memory_size()
}
for child in &self.child_data {
size += child.get_buffer_memory_size();
}
size
}
/// Returns the total number of bytes of memory occupied physically by this [ArrayData].
pub fn get_array_memory_size(&self) -> usize {
let mut size = mem::size_of_val(self);
// Calculate rest of the fields top down which contain actual data
for buffer in &self.buffers {
size += mem::size_of::<Buffer>();
size += buffer.capacity();
}
if let Some(bitmap) = &self.null_bitmap {
// this includes the size of the bitmap struct itself, since it is stored directly in
// this struct we already counted those bytes in the size_of_val(self) above
size += bitmap.get_array_memory_size();
size -= mem::size_of::<Bitmap>();
}
for child in &self.child_data {
size += child.get_array_memory_size();
}
size
}
/// Creates a zero-copy slice of itself. This creates a new [ArrayData]
/// with a different offset, len and a shifted null bitmap.
///
/// # Panics
///
/// Panics if `offset + length > self.len()`.
pub fn slice(&self, offset: usize, length: usize) -> ArrayData {
assert!((offset + length) <= self.len());
if let DataType::Struct(_) = self.data_type() {
// Slice into children
let new_offset = self.offset + offset;
let new_data = ArrayData {
data_type: self.data_type().clone(),
len: length,
null_count: count_nulls(self.null_buffer(), new_offset, length),
offset: new_offset,
buffers: self.buffers.clone(),
// Slice child data, to propagate offsets down to them
child_data: self
.child_data()
.iter()
.map(|data| data.slice(offset, length))
.collect(),
null_bitmap: self.null_bitmap().clone(),
};
new_data
} else {
let mut new_data = self.clone();
new_data.len = length;
new_data.offset = offset + self.offset;
new_data.null_count =
count_nulls(new_data.null_buffer(), new_data.offset, new_data.len);
new_data
}
}
/// Returns the `buffer` as a slice of type `T` starting at self.offset
/// # Panics
/// This function panics if:
/// * the buffer is not byte-aligned with type T, or
/// * the datatype is `Boolean` (it corresponds to a bit-packed buffer where the offset is not applicable)
#[inline]
pub(crate) fn buffer<T: ArrowNativeType>(&self, buffer: usize) -> &[T] {
let values = unsafe { self.buffers[buffer].as_slice().align_to::<T>() };
if !values.0.is_empty() || !values.2.is_empty() {
panic!("The buffer is not byte-aligned with its interpretation")
};
assert_ne!(self.data_type, DataType::Boolean);
&values.1[self.offset..]
}
/// Returns a new empty [ArrayData] valid for `data_type`.
pub fn new_empty(data_type: &DataType) -> Self {
let buffers = new_buffers(data_type, 0);
let [buffer1, buffer2] = buffers;
let buffers = into_buffers(data_type, buffer1, buffer2);
let child_data = match data_type {
DataType::Null
| DataType::Boolean
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64
| DataType::Float16
| DataType::Float32
| DataType::Float64
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Timestamp(_, _)
| DataType::Utf8
| DataType::Binary
| DataType::LargeUtf8
| DataType::LargeBinary
| DataType::Interval(_)
| DataType::FixedSizeBinary(_)
| DataType::Decimal(_, _) => vec![],
DataType::List(field) => {
vec![Self::new_empty(field.data_type())]
}
DataType::FixedSizeList(field, _) => {
vec![Self::new_empty(field.data_type())]
}
DataType::LargeList(field) => {
vec![Self::new_empty(field.data_type())]
}
DataType::Struct(fields) => fields
.iter()
.map(|field| Self::new_empty(field.data_type()))
.collect(),
DataType::Map(field, _) => {
vec![Self::new_empty(field.data_type())]
}
DataType::Union(fields, _) => fields
.iter()
.map(|field| Self::new_empty(field.data_type()))
.collect(),
DataType::Dictionary(_, data_type) => {
vec![Self::new_empty(data_type)]
}
};
// Data was constructed correctly above
unsafe {
Self::new_unchecked(
data_type.clone(),
0,
Some(0),
None,
0,
buffers,
child_data,
)
}
}
/// "cheap" validation of an `ArrayData`. Ensures buffers are
/// sufficiently sized to store `len` + `offset` total elements of
/// `data_type` and performs other inexpensive consistency checks.
///
/// This check is "cheap" in the sense that it does not validate the
/// contents of the buffers (e.g. that all offsets for UTF8 arrays
/// are within the bounds of the values buffer).
///
/// See [ArrayData::validate_full] to validate fully the offset content
/// and the validitiy of utf8 data
pub fn validate(&self) -> Result<()> {
// Need at least this mich space in each buffer
let len_plus_offset = self.len + self.offset;
// Check that the data layout conforms to the spec
let layout = layout(&self.data_type);
if self.buffers.len() != layout.buffers.len() {
return Err(ArrowError::InvalidArgumentError(format!(
"Expected {} buffers in array of type {:?}, got {}",
layout.buffers.len(),
self.data_type,
self.buffers.len(),
)));
}
for (i, (buffer, spec)) in
self.buffers.iter().zip(layout.buffers.iter()).enumerate()
{
match spec {
BufferSpec::FixedWidth { byte_width } => {
let min_buffer_size = len_plus_offset
.checked_mul(*byte_width)
.expect("integer overflow computing min buffer size");
if buffer.len() < min_buffer_size {
return Err(ArrowError::InvalidArgumentError(format!(
"Need at least {} bytes in buffers[{}] in array of type {:?}, but got {}",
min_buffer_size, i, self.data_type, buffer.len()
)));
}
}
BufferSpec::VariableWidth => {
// not cheap to validate (need to look at the
// data). Partially checked in validate_offsets
// called below. Can check with `validate_full`
}
BufferSpec::BitMap => {
let min_buffer_size = bit_util::ceil(len_plus_offset, 8);
if buffer.len() < min_buffer_size {
return Err(ArrowError::InvalidArgumentError(format!(
"Need at least {} bytes for bitmap in buffers[{}] in array of type {:?}, but got {}",
min_buffer_size, i, self.data_type, buffer.len()
)));
}
}
BufferSpec::AlwaysNull => {
// Nothing to validate
}
}
}
if self.null_count > self.len {
return Err(ArrowError::InvalidArgumentError(format!(
"null_count {} for an array exceeds length of {} elements",
self.null_count, self.len
)));
}
// check null bit buffer size
if let Some(null_bit_map) = self.null_bitmap.as_ref() {
let null_bit_buffer = null_bit_map.buffer_ref();
let needed_len = bit_util::ceil(len_plus_offset, 8);
if null_bit_buffer.len() < needed_len {
return Err(ArrowError::InvalidArgumentError(format!(
"null_bit_buffer size too small. got {} needed {}",
null_bit_buffer.len(),
needed_len
)));
}
} else if self.null_count > 0 {
return Err(ArrowError::InvalidArgumentError(format!(
"Array of type {} has {} nulls but no null bitmap",
self.data_type, self.null_count
)));
}
self.validate_child_data()?;
// Additional Type specific checks
match &self.data_type {
DataType::Utf8 | DataType::Binary => {
self.validate_offsets::<i32>(&self.buffers[0], self.buffers[1].len())?;
}
DataType::LargeUtf8 | DataType::LargeBinary => {
self.validate_offsets::<i64>(&self.buffers[0], self.buffers[1].len())?;
}
DataType::Dictionary(key_type, _value_type) => {
// At the moment, constructing a DictionaryArray will also check this
if !DataType::is_dictionary_key_type(key_type) {
return Err(ArrowError::InvalidArgumentError(format!(
"Dictionary values must be integer, but was {}",
key_type
)));
}
}
_ => {}
};
Ok(())
}
/// Returns a reference to the data in `buffer` as a typed slice
/// (typically `&[i32]` or `&[i64]`) after validating. The
/// returned slice is guaranteed to have at least `self.len + 1`
/// entries
fn typed_offsets<'a, T: ArrowNativeType + num::Num + std::fmt::Display>(
&'a self,
buffer: &'a Buffer,
) -> Result<&'a [T]> {
// Validate that there are the correct number of offsets for this array's length
let required_offsets = self.len + self.offset + 1;
// An empty list-like array can have 0 offsets
if buffer.is_empty() {
return Ok(&[]);
}
if (buffer.len() / std::mem::size_of::<T>()) < required_offsets {
return Err(ArrowError::InvalidArgumentError(format!(
"Offsets buffer size (bytes): {} isn't large enough for {}. Length {} needs {}",
buffer.len(), self.data_type, self.len, required_offsets
)));
}
// Justification: buffer size was validated above
Ok(unsafe {
&(buffer.typed_data::<T>()[self.offset..self.offset + self.len + 1])
})
}
/// Does a cheap sanity check that the `self.len` values in `buffer` are valid
/// offsets (of type T) into some other buffer of `values_length` bytes long
fn validate_offsets<T: ArrowNativeType + num::Num + std::fmt::Display>(
&self,
buffer: &Buffer,
values_length: usize,
) -> Result<()> {
// Justification: buffer size was validated above
let offsets = self.typed_offsets::<T>(buffer)?;
if offsets.is_empty() {
return Ok(());
}
let first_offset = offsets[0].to_usize().ok_or_else(|| {
ArrowError::InvalidArgumentError(format!(
"Error converting offset[0] ({}) to usize for {}",
offsets[0], self.data_type
))
})?;
let last_offset = offsets[self.len].to_usize().ok_or_else(|| {
ArrowError::InvalidArgumentError(format!(
"Error converting offset[{}] ({}) to usize for {}",
self.len, offsets[self.len], self.data_type
))
})?;
if first_offset > values_length {
return Err(ArrowError::InvalidArgumentError(format!(
"First offset {} of {} is larger than values length {}",
first_offset, self.data_type, values_length,
)));
}
if last_offset > values_length {
return Err(ArrowError::InvalidArgumentError(format!(
"Last offset {} of {} is larger than values length {}",
last_offset, self.data_type, values_length,
)));
}
if first_offset > last_offset {
return Err(ArrowError::InvalidArgumentError(format!(
"First offset {} in {} is smaller than last offset {}",
first_offset, self.data_type, last_offset,
)));
}
Ok(())
}
/// Validates the layout of `child_data` ArrayData structures
fn validate_child_data(&self) -> Result<()> {
match &self.data_type {
DataType::List(field) | DataType::Map(field, _) => {
let values_data = self.get_single_valid_child_data(field.data_type())?;
self.validate_offsets::<i32>(&self.buffers[0], values_data.len)?;
Ok(())
}
DataType::LargeList(field) => {
let values_data = self.get_single_valid_child_data(field.data_type())?;
self.validate_offsets::<i64>(&self.buffers[0], values_data.len)?;
Ok(())
}
DataType::FixedSizeList(field, list_size) => {
let values_data = self.get_single_valid_child_data(field.data_type())?;
let list_size: usize = (*list_size).try_into().map_err(|_| {
ArrowError::InvalidArgumentError(format!(
"{} has a negative list_size {}",
self.data_type, list_size
))
})?;
let expected_values_len = self.len
.checked_mul(list_size)
.expect("integer overflow computing expected number of expected values in FixedListSize");
if values_data.len < expected_values_len {
return Err(ArrowError::InvalidArgumentError(format!(
"Values length {} is less than the length ({}) multiplied by the value size ({}) for {}",
values_data.len, list_size, list_size, self.data_type
)));
}
Ok(())
}
DataType::Struct(fields) => {
self.validate_num_child_data(fields.len())?;
for (i, field) in fields.iter().enumerate() {
let field_data = self.get_valid_child_data(i, field.data_type())?;
// Ensure child field has sufficient size
if field_data.len < self.len {
return Err(ArrowError::InvalidArgumentError(format!(
"{} child array #{} for field {} has length smaller than expected for struct array ({} < {})",
self.data_type, i, field.name(), field_data.len, self.len
)));
}
}
Ok(())
}
DataType::Union(fields, mode) => {
self.validate_num_child_data(fields.len())?;
for (i, field) in fields.iter().enumerate() {
let field_data = self.get_valid_child_data(i, field.data_type())?;
if mode == &UnionMode::Sparse
&& field_data.len < (self.len + self.offset)
{
return Err(ArrowError::InvalidArgumentError(format!(
"Sparse union child array #{} has length smaller than expected for union array ({} < {})",
i, field_data.len, self.len + self.offset
)));
}
}
Ok(())
}
DataType::Dictionary(_key_type, value_type) => {
self.get_single_valid_child_data(value_type)?;
Ok(())
}
_ => {
// other types do not have child data
if !self.child_data.is_empty() {
return Err(ArrowError::InvalidArgumentError(format!(
"Expected no child arrays for type {} but got {}",
self.data_type,
self.child_data.len()
)));
}
Ok(())
}
}
}
/// Ensures that this array data has a single child_data with the
/// expected type, and calls `validate()` on it. Returns a
/// reference to that child_data
fn get_single_valid_child_data(
&self,
expected_type: &DataType,
) -> Result<&ArrayData> {
self.validate_num_child_data(1)?;
self.get_valid_child_data(0, expected_type)
}
/// Returns `Err` if self.child_data does not have exactly `expected_len` elements
fn validate_num_child_data(&self, expected_len: usize) -> Result<()> {
if self.child_data().len() != expected_len {
Err(ArrowError::InvalidArgumentError(format!(
"Value data for {} should contain {} child data array(s), had {}",
self.data_type(),
expected_len,
self.child_data.len()
)))
} else {
Ok(())
}
}
/// Ensures that `child_data[i]` has the expected type, calls
/// `validate()` on it, and returns a reference to that child_data
fn get_valid_child_data(
&self,
i: usize,
expected_type: &DataType,
) -> Result<&ArrayData> {
let values_data = self.child_data
.get(i)
.ok_or_else(|| {
ArrowError::InvalidArgumentError(format!(
"{} did not have enough child arrays. Expected at least {} but had only {}",
self.data_type, i+1, self.child_data.len()
))
})?;
if expected_type != &values_data.data_type {
return Err(ArrowError::InvalidArgumentError(format!(
"Child type mismatch for {}. Expected {} but child data had {}",
self.data_type, expected_type, values_data.data_type
)));
}
values_data.validate()?;
Ok(values_data)
}
/// "expensive" validation that ensures:
///
/// 1. Null count is correct
/// 2. All offsets are valid
/// 3. All String data is valid UTF-8
/// 3. All dictionary offsets are valid
///
/// Does not (yet) check
/// 1. Union type_ids are valid see [#85](https://github.com/apache/arrow-rs/issues/85)
/// Note calls `validate()` internally
pub fn validate_full(&self) -> Result<()> {
// Check all buffer sizes prior to looking at them more deeply in this function
self.validate()?;
let null_bitmap_buffer = self
.null_bitmap
.as_ref()
.map(|null_bitmap| null_bitmap.buffer_ref());
let actual_null_count = count_nulls(null_bitmap_buffer, self.offset, self.len);
if actual_null_count != self.null_count {
return Err(ArrowError::InvalidArgumentError(format!(
"null_count value ({}) doesn't match actual number of nulls in array ({})",
self.null_count, actual_null_count
)));
}
match &self.data_type {
DataType::Utf8 => {
self.validate_utf8::<i32>()?;
}
DataType::LargeUtf8 => {
self.validate_utf8::<i64>()?;
}
DataType::Binary => {
self.validate_offsets_full::<i32>(self.buffers[1].len())?;
}
DataType::LargeBinary => {
self.validate_offsets_full::<i64>(self.buffers[1].len())?;
}
DataType::List(_) | DataType::Map(_, _) => {
let child = &self.child_data[0];
self.validate_offsets_full::<i32>(child.len + child.offset)?;
}
DataType::LargeList(_) => {
let child = &self.child_data[0];
self.validate_offsets_full::<i64>(child.len + child.offset)?;
}
DataType::Union(_, _) => {
// Validate Union Array as part of implementing new Union semantics
// See comments in `ArrayData::validate()`
// https://github.com/apache/arrow-rs/issues/85
//
// TODO file follow on ticket for full union validation
}
DataType::Dictionary(key_type, _value_type) => {
let dictionary_length: i64 = self.child_data[0].len.try_into().unwrap();
let max_value = dictionary_length - 1;
match key_type.as_ref() {
DataType::UInt8 => self.check_bounds::<u8>(max_value)?,
DataType::UInt16 => self.check_bounds::<u16>(max_value)?,
DataType::UInt32 => self.check_bounds::<u32>(max_value)?,
DataType::UInt64 => self.check_bounds::<u64>(max_value)?,
DataType::Int8 => self.check_bounds::<i8>(max_value)?,
DataType::Int16 => self.check_bounds::<i16>(max_value)?,
DataType::Int32 => self.check_bounds::<i32>(max_value)?,
DataType::Int64 => self.check_bounds::<i64>(max_value)?,
_ => unreachable!(),
}
}
_ => {
// No extra validation check required for other types
}
};
// validate all children recursively
self.child_data
.iter()
.enumerate()
.try_for_each(|(i, child_data)| {
child_data.validate_full().map_err(|e| {
ArrowError::InvalidArgumentError(format!(
"{} child #{} invalid: {}",
self.data_type, i, e
))
})
})?;
Ok(())
}
/// Calls the `validate(item_index, range)` function for each of
/// the ranges specified in the arrow offset buffer of type
/// `T`. Also validates that each offset is smaller than
/// `max_offset`
///
/// For example, the offset buffer contained `[1, 2, 4]`, this
/// function would call `validate([1,2])`, and `validate([2,4])`
fn validate_each_offset<T, V>(
&self,
offset_buffer: &Buffer,
offset_limit: usize,
validate: V,
) -> Result<()>
where
T: ArrowNativeType + std::convert::TryInto<usize> + num::Num + std::fmt::Display,
V: Fn(usize, Range<usize>) -> Result<()>,
{
// An empty binary-like array can have 0 offsets
if self.len == 0 && offset_buffer.is_empty() {
return Ok(());
}
let offsets = self.typed_offsets::<T>(offset_buffer)?;
offsets
.iter()
.zip(offsets.iter().skip(1))
.enumerate()
.map(|(i, (&start_offset, &end_offset))| {
let start_offset: usize = start_offset
.try_into()
.map_err(|_| {
ArrowError::InvalidArgumentError(format!(
"Offset invariant failure: could not convert start_offset {} to usize in slot {}",
start_offset, i))
})?;
let end_offset: usize = end_offset
.try_into()
.map_err(|_| {
ArrowError::InvalidArgumentError(format!(
"Offset invariant failure: Could not convert end_offset {} to usize in slot {}",
end_offset, i+1))
})?;
if start_offset > offset_limit {
return Err(ArrowError::InvalidArgumentError(format!(
"Offset invariant failure: offset for slot {} out of bounds: {} > {}",
i, start_offset, offset_limit))
);
}
if end_offset > offset_limit {
return Err(ArrowError::InvalidArgumentError(format!(
"Offset invariant failure: offset for slot {} out of bounds: {} > {}",
i, end_offset, offset_limit))
);
}
// check range actually is low -> high
if start_offset > end_offset {
return Err(ArrowError::InvalidArgumentError(format!(
"Offset invariant failure: non-monotonic offset at slot {}: {} > {}",
i, start_offset, end_offset))
);
}
Ok((i, start_offset..end_offset))
})
.try_for_each(|res: Result<(usize, Range<usize>)>| {
let (item_index, range) = res?;
validate(item_index, range)
})
}
/// Ensures that all strings formed by the offsets in `buffers[0]`
/// into `buffers[1]` are valid utf8 sequences
fn validate_utf8<T>(&self) -> Result<()>
where
T: ArrowNativeType + std::convert::TryInto<usize> + num::Num + std::fmt::Display,
{
let offset_buffer = &self.buffers[0];
let values_buffer = &self.buffers[1].as_slice();
self.validate_each_offset::<T, _>(
offset_buffer,
values_buffer.len(),
|string_index, range| {
std::str::from_utf8(&values_buffer[range.clone()]).map_err(|e| {
ArrowError::InvalidArgumentError(format!(
"Invalid UTF8 sequence at string index {} ({:?}): {}",
string_index, range, e
))
})?;
Ok(())
},
)
}
/// Ensures that all offsets in `buffers[0]` into `buffers[1]` are
/// between `0` and `offset_limit`
fn validate_offsets_full<T>(&self, offset_limit: usize) -> Result<()>
where
T: ArrowNativeType + std::convert::TryInto<usize> + num::Num + std::fmt::Display,
{
let offset_buffer = &self.buffers[0];
self.validate_each_offset::<T, _>(
offset_buffer,
offset_limit,
|_string_index, _range| {
// No validation applied to each value, but the iteration
// itself applies bounds checking to each range
Ok(())
},
)
}
/// Validates that each value in self.buffers (typed as T)
/// is within the range [0, max_value], inclusive
fn check_bounds<T>(&self, max_value: i64) -> Result<()>
where
T: ArrowNativeType + std::convert::TryInto<i64> + num::Num + std::fmt::Display,
{
let required_len = self.len + self.offset;
let buffer = &self.buffers[0];
// This should have been checked as part of `validate()` prior
// to calling `validate_full()` but double check to be sure
assert!(buffer.len() / std::mem::size_of::<T>() >= required_len);
// Justification: buffer size was validated above
let indexes: &[T] =
unsafe { &(buffer.typed_data::<T>()[self.offset..self.offset + self.len]) };
indexes.iter().enumerate().try_for_each(|(i, &dict_index)| {
// Do not check the value is null (value can be arbitrary)
if self.is_null(i) {
return Ok(());
}
let dict_index: i64 = dict_index.try_into().map_err(|_| {
ArrowError::InvalidArgumentError(format!(
"Value at position {} out of bounds: {} (can not convert to i64)",
i, dict_index
))
})?;
if dict_index < 0 || dict_index > max_value {
return Err(ArrowError::InvalidArgumentError(format!(
"Value at position {} out of bounds: {} (should be in [0, {}])",
i, dict_index, max_value
)));
}
Ok(())
})
}
/// Returns true if this `ArrayData` is equal to `other`, using pointer comparisons
/// to determine buffer equality. This is cheaper than `PartialEq::eq` but may
/// return false when the arrays are logically equal
pub fn ptr_eq(&self, other: &Self) -> bool {
if self.offset != other.offset
|| self.len != other.len
|| self.null_count != other.null_count
|| self.data_type != other.data_type
|| self.buffers.len() != other.buffers.len()
|| self.child_data.len() != other.child_data.len()
{
return false;
}
match (&self.null_bitmap, &other.null_bitmap) {
(Some(a), Some(b)) if a.bits.as_ptr() != b.bits.as_ptr() => return false,
(Some(_), None) | (None, Some(_)) => return false,
_ => {}
};
if !self
.buffers
.iter()
.zip(other.buffers.iter())
.all(|(a, b)| a.as_ptr() == b.as_ptr())
{
return false;
}
self.child_data
.iter()
.zip(other.child_data.iter())
.all(|(a, b)| a.ptr_eq(b))
}
}
/// Return the expected [`DataTypeLayout`] Arrays of this data
/// type are expected to have
fn layout(data_type: &DataType) -> DataTypeLayout {
// based on C/C++ implementation in
// https://github.com/apache/arrow/blob/661c7d749150905a63dd3b52e0a04dac39030d95/cpp/src/arrow/type.h (and .cc)
use std::mem::size_of;
match data_type {
DataType::Null => DataTypeLayout::new_empty(),
DataType::Boolean => DataTypeLayout {
buffers: vec![BufferSpec::BitMap],
},
DataType::Int8 => DataTypeLayout::new_fixed_width(size_of::<i8>()),
DataType::Int16 => DataTypeLayout::new_fixed_width(size_of::<i16>()),
DataType::Int32 => DataTypeLayout::new_fixed_width(size_of::<i32>()),
DataType::Int64 => DataTypeLayout::new_fixed_width(size_of::<i64>()),
DataType::UInt8 => DataTypeLayout::new_fixed_width(size_of::<u8>()),
DataType::UInt16 => DataTypeLayout::new_fixed_width(size_of::<u16>()),
DataType::UInt32 => DataTypeLayout::new_fixed_width(size_of::<u32>()),
DataType::UInt64 => DataTypeLayout::new_fixed_width(size_of::<u64>()),
DataType::Float16 => DataTypeLayout::new_fixed_width(size_of::<f16>()),
DataType::Float32 => DataTypeLayout::new_fixed_width(size_of::<f32>()),
DataType::Float64 => DataTypeLayout::new_fixed_width(size_of::<f64>()),
DataType::Timestamp(_, _) => DataTypeLayout::new_fixed_width(size_of::<i64>()),
DataType::Date32 => DataTypeLayout::new_fixed_width(size_of::<i32>()),
DataType::Date64 => DataTypeLayout::new_fixed_width(size_of::<i64>()),
DataType::Time32(_) => DataTypeLayout::new_fixed_width(size_of::<i32>()),
DataType::Time64(_) => DataTypeLayout::new_fixed_width(size_of::<i64>()),
DataType::Interval(IntervalUnit::YearMonth) => {
DataTypeLayout::new_fixed_width(size_of::<i32>())
}
DataType::Interval(IntervalUnit::DayTime) => {
DataTypeLayout::new_fixed_width(size_of::<i64>())
}
DataType::Interval(IntervalUnit::MonthDayNano) => {
DataTypeLayout::new_fixed_width(size_of::<i128>())
}
DataType::Duration(_) => DataTypeLayout::new_fixed_width(size_of::<i64>()),
DataType::Binary => DataTypeLayout::new_binary(size_of::<i32>()),
DataType::FixedSizeBinary(bytes_per_value) => {
let bytes_per_value: usize = (*bytes_per_value)
.try_into()
.expect("negative size for fixed size binary");
DataTypeLayout::new_fixed_width(bytes_per_value)
}
DataType::LargeBinary => DataTypeLayout::new_binary(size_of::<i64>()),
DataType::Utf8 => DataTypeLayout::new_binary(size_of::<i32>()),
DataType::LargeUtf8 => DataTypeLayout::new_binary(size_of::<i64>()),
DataType::List(_) => DataTypeLayout::new_fixed_width(size_of::<i32>()),
DataType::FixedSizeList(_, _) => DataTypeLayout::new_empty(), // all in child data
DataType::LargeList(_) => DataTypeLayout::new_fixed_width(size_of::<i32>()),
DataType::Struct(_) => DataTypeLayout::new_empty(), // all in child data,
DataType::Union(_, mode) => {
let type_ids = BufferSpec::FixedWidth {
byte_width: size_of::<i8>(),
};
DataTypeLayout {
buffers: match mode {
UnionMode::Sparse => {
vec![type_ids]
}
UnionMode::Dense => {
vec![
type_ids,
BufferSpec::FixedWidth {
byte_width: size_of::<i32>(),
},
]
}
},
}
}
DataType::Dictionary(key_type, _value_type) => layout(key_type),
DataType::Decimal(_, _) => {
// Decimals are always some fixed width; The rust implementation
// always uses 16 bytes / size of i128
DataTypeLayout::new_fixed_width(size_of::<i128>())
}
DataType::Map(_, _) => {
// same as ListType
DataTypeLayout::new_fixed_width(size_of::<i32>())
}
}
}
/// Layout specification for a data type
#[derive(Debug, PartialEq)]
// Note: Follows structure from C++: https://github.com/apache/arrow/blob/master/cpp/src/arrow/type.h#L91
struct DataTypeLayout {
/// A vector of buffer layout specifications, one for each expected buffer
pub buffers: Vec<BufferSpec>,
}
impl DataTypeLayout {
/// Describes a basic numeric array where each element has a fixed width
pub fn new_fixed_width(byte_width: usize) -> Self {
Self {
buffers: vec![BufferSpec::FixedWidth { byte_width }],
}
}
/// Describes arrays which have no data of their own
/// (e.g. FixedSizeList). Note such arrays may still have a Null
/// Bitmap
pub fn new_empty() -> Self {
Self { buffers: vec![] }
}
/// Describes a basic numeric array where each element has a fixed
/// with offset buffer of `offset_byte_width` bytes, followed by a
/// variable width data buffer
pub fn new_binary(offset_byte_width: usize) -> Self {
Self {
buffers: vec![
// offsets
BufferSpec::FixedWidth {
byte_width: offset_byte_width,
},
// values
BufferSpec::VariableWidth,
],
}
}
}
/// Layout specification for a single data type buffer
#[derive(Debug, PartialEq)]
enum BufferSpec {
/// each element has a fixed width
FixedWidth { byte_width: usize },
/// Variable width, such as string data for utf8 data
VariableWidth,
/// Buffer holds a bitmap.
///
/// Note: Unlike the C++ implementation, the null/validity buffer
/// is handled specially rather than as another of the buffers in
/// the spec, so this variant is only used for the Boolean type.
BitMap,
/// Buffer is always null. Unused currently in Rust implementation,
/// (used in C++ for Union type)
#[allow(dead_code)]
AlwaysNull,
}
impl PartialEq for ArrayData {
fn eq(&self, other: &Self) -> bool {
equal(self, other)
}
}
/// Builder for `ArrayData` type
#[derive(Debug)]
pub struct ArrayDataBuilder {
data_type: DataType,
len: usize,
null_count: Option<usize>,
null_bit_buffer: Option<Buffer>,
offset: usize,
buffers: Vec<Buffer>,
child_data: Vec<ArrayData>,
}
impl ArrayDataBuilder {
#[inline]
pub const fn new(data_type: DataType) -> Self {
Self {
data_type,
len: 0,
null_count: None,
null_bit_buffer: None,
offset: 0,
buffers: vec![],
child_data: vec![],
}
}
#[inline]
#[allow(clippy::len_without_is_empty)]
pub const fn len(mut self, n: usize) -> Self {
self.len = n;
self
}
pub fn null_count(mut self, null_count: usize) -> Self {
self.null_count = Some(null_count);
self
}
pub fn null_bit_buffer(mut self, buf: Buffer) -> Self {
self.null_bit_buffer = Some(buf);
self
}
#[inline]
pub const fn offset(mut self, n: usize) -> Self {
self.offset = n;
self
}
pub fn buffers(mut self, v: Vec<Buffer>) -> Self {
self.buffers = v;
self
}
pub fn add_buffer(mut self, b: Buffer) -> Self {
self.buffers.push(b);
self
}
pub fn child_data(mut self, v: Vec<ArrayData>) -> Self {
self.child_data = v;
self
}
pub fn add_child_data(mut self, r: ArrayData) -> Self {
self.child_data.push(r);
self
}
/// Creates an array data, without any validation
///
/// # Safety
///
/// The same caveats as [`ArrayData::new_unchecked`]
/// apply.
pub unsafe fn build_unchecked(self) -> ArrayData {
ArrayData::new_unchecked(
self.data_type,
self.len,
self.null_count,
self.null_bit_buffer,
self.offset,
self.buffers,
self.child_data,
)
}
/// Creates an array data, validating all inputs
pub fn build(self) -> Result<ArrayData> {
ArrayData::try_new(
self.data_type,
self.len,
self.null_count,
self.null_bit_buffer,
self.offset,
self.buffers,
self.child_data,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::array::{
Array, BooleanBuilder, Int32Array, Int32Builder, Int64Array, StringArray,
StructBuilder, UInt64Array,
};
use crate::buffer::Buffer;
use crate::datatypes::Field;
use crate::util::bit_util;
#[test]
fn test_builder() {
// Buffer needs to be at least 25 long
let v = (0..25).collect::<Vec<i32>>();
let b1 = Buffer::from_slice_ref(&v);
let arr_data = ArrayData::builder(DataType::Int32)
.len(20)
.offset(5)
.add_buffer(b1)
.null_bit_buffer(Buffer::from(vec![
0b01011111, 0b10110101, 0b01100011, 0b00011110,
]))
.build()
.unwrap();
assert_eq!(20, arr_data.len());
assert_eq!(10, arr_data.null_count());
assert_eq!(5, arr_data.offset());
assert_eq!(1, arr_data.buffers().len());
assert_eq!(
Buffer::from_slice_ref(&v).as_slice(),
arr_data.buffers()[0].as_slice()
);
}
#[test]
fn test_builder_with_child_data() {
let child_arr_data = ArrayData::try_new(
DataType::Int32,
5,
Some(0),
None,
0,
vec![Buffer::from_slice_ref(&[1i32, 2, 3, 4, 5])],
vec![],
)
.unwrap();
let data_type = DataType::Struct(vec![Field::new("x", DataType::Int32, true)]);
let arr_data = ArrayData::builder(data_type)
.len(5)
.offset(0)
.add_child_data(child_arr_data.clone())
.build()
.unwrap();
assert_eq!(5, arr_data.len());
assert_eq!(1, arr_data.child_data().len());
assert_eq!(child_arr_data, arr_data.child_data()[0]);
}
#[test]
fn test_null_count() {
let mut bit_v: [u8; 2] = [0; 2];
bit_util::set_bit(&mut bit_v, 0);
bit_util::set_bit(&mut bit_v, 3);
bit_util::set_bit(&mut bit_v, 10);
let arr_data = ArrayData::builder(DataType::Int32)
.len(16)
.add_buffer(make_i32_buffer(16))
.null_bit_buffer(Buffer::from(bit_v))
.build()
.unwrap();
assert_eq!(13, arr_data.null_count());
// Test with offset
let mut bit_v: [u8; 2] = [0; 2];
bit_util::set_bit(&mut bit_v, 0);
bit_util::set_bit(&mut bit_v, 3);
bit_util::set_bit(&mut bit_v, 10);
let arr_data = ArrayData::builder(DataType::Int32)
.len(12)
.offset(2)
.add_buffer(make_i32_buffer(14)) // requires at least 14 bytes of space,
.null_bit_buffer(Buffer::from(bit_v))
.build()
.unwrap();
assert_eq!(10, arr_data.null_count());
}
#[test]
fn test_null_buffer_ref() {
let mut bit_v: [u8; 2] = [0; 2];
bit_util::set_bit(&mut bit_v, 0);
bit_util::set_bit(&mut bit_v, 3);
bit_util::set_bit(&mut bit_v, 10);
let arr_data = ArrayData::builder(DataType::Int32)
.len(16)
.add_buffer(make_i32_buffer(16))
.null_bit_buffer(Buffer::from(bit_v))
.build()
.unwrap();
assert!(arr_data.null_buffer().is_some());
assert_eq!(&bit_v, arr_data.null_buffer().unwrap().as_slice());
}
#[test]
fn test_slice() {
let mut bit_v: [u8; 2] = [0; 2];
bit_util::set_bit(&mut bit_v, 0);
bit_util::set_bit(&mut bit_v, 3);
bit_util::set_bit(&mut bit_v, 10);
let data = ArrayData::builder(DataType::Int32)
.len(16)
.add_buffer(make_i32_buffer(16))
.null_bit_buffer(Buffer::from(bit_v))
.build()
.unwrap();
let new_data = data.slice(1, 15);
assert_eq!(data.len() - 1, new_data.len());
assert_eq!(1, new_data.offset());
assert_eq!(data.null_count(), new_data.null_count());
// slice of a slice (removes one null)
let new_data = new_data.slice(1, 14);
assert_eq!(data.len() - 2, new_data.len());
assert_eq!(2, new_data.offset());
assert_eq!(data.null_count() - 1, new_data.null_count());
}
#[test]
fn test_equality() {
let int_data = ArrayData::builder(DataType::Int32)
.len(1)
.add_buffer(make_i32_buffer(1))
.build()
.unwrap();
let float_data = ArrayData::builder(DataType::Float32)
.len(1)
.add_buffer(make_f32_buffer(1))
.build()
.unwrap();
assert_ne!(int_data, float_data);
assert!(!int_data.ptr_eq(&float_data));
assert!(int_data.ptr_eq(&int_data));
let int_data_clone = int_data.clone();
assert_eq!(int_data, int_data_clone);
assert!(int_data.ptr_eq(&int_data_clone));
assert!(int_data_clone.ptr_eq(&int_data));
let int_data_slice = int_data_clone.slice(1, 0);
assert!(int_data_slice.ptr_eq(&int_data_slice));
assert!(!int_data.ptr_eq(&int_data_slice));
assert!(!int_data_slice.ptr_eq(&int_data));
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
let offsets_buffer = Buffer::from_slice_ref(&[0_i32, 2_i32, 2_i32, 5_i32]);
let string_data = ArrayData::try_new(
DataType::Utf8,
3,
Some(1),
Some(Buffer::from_iter(vec![true, false, true])),
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
assert_ne!(float_data, string_data);
assert!(!float_data.ptr_eq(&string_data));
assert!(string_data.ptr_eq(&string_data));
let string_data_cloned = string_data.clone();
assert!(string_data_cloned.ptr_eq(&string_data));
assert!(string_data.ptr_eq(&string_data_cloned));
let string_data_slice = string_data.slice(1, 2);
assert!(string_data_slice.ptr_eq(&string_data_slice));
assert!(!string_data_slice.ptr_eq(&string_data))
}
#[test]
fn test_count_nulls() {
let null_buffer = Some(Buffer::from(vec![0b00010110, 0b10011111]));
let count = count_nulls(null_buffer.as_ref(), 0, 16);
assert_eq!(count, 7);
let count = count_nulls(null_buffer.as_ref(), 4, 8);
assert_eq!(count, 3);
}
#[test]
#[should_panic(
expected = "Need at least 80 bytes in buffers[0] in array of type Int64, but got 8"
)]
fn test_buffer_too_small() {
let buffer = Buffer::from_slice_ref(&[0i32, 2i32]);
// should fail as the declared size (10*8 = 80) is larger than the underlying bfufer (8)
ArrayData::try_new(DataType::Int64, 10, Some(0), None, 0, vec![buffer], vec![])
.unwrap();
}
#[test]
#[should_panic(
expected = "Need at least 16 bytes in buffers[0] in array of type Int64, but got 8"
)]
fn test_buffer_too_small_offset() {
let buffer = Buffer::from_slice_ref(&[0i32, 2i32]);
// should fail -- size is ok, but also has offset
ArrayData::try_new(DataType::Int64, 1, Some(0), None, 1, vec![buffer], vec![])
.unwrap();
}
#[test]
#[should_panic(expected = "Expected 1 buffers in array of type Int64, got 2")]
fn test_bad_number_of_buffers() {
let buffer1 = Buffer::from_slice_ref(&[0i32, 2i32]);
let buffer2 = Buffer::from_slice_ref(&[0i32, 2i32]);
ArrayData::try_new(
DataType::Int64,
1,
Some(0),
None,
0,
vec![buffer1, buffer2],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "integer overflow computing min buffer size")]
fn test_fixed_width_overflow() {
let buffer = Buffer::from_slice_ref(&[0i32, 2i32]);
ArrayData::try_new(
DataType::Int64,
usize::MAX,
Some(0),
None,
0,
vec![buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "null_bit_buffer size too small. got 1 needed 2")]
fn test_bitmap_too_small() {
let buffer = make_i32_buffer(9);
let null_bit_buffer = Buffer::from(vec![0b11111111]);
ArrayData::try_new(
DataType::Int32,
9,
Some(0),
Some(null_bit_buffer),
0,
vec![buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "null_count 3 for an array exceeds length of 2 elements")]
fn test_bad_null_count() {
let buffer = Buffer::from_slice_ref(&[0i32, 2i32]);
ArrayData::try_new(DataType::Int32, 2, Some(3), None, 0, vec![buffer], vec![])
.unwrap();
}
// Test creating a dictionary with a non integer type
#[test]
#[should_panic(expected = "Dictionary values must be integer, but was Utf8")]
fn test_non_int_dictionary() {
let i32_buffer = Buffer::from_slice_ref(&[0i32, 2i32]);
let data_type =
DataType::Dictionary(Box::new(DataType::Utf8), Box::new(DataType::Int32));
let child_data = ArrayData::try_new(
DataType::Int32,
1,
Some(0),
None,
0,
vec![i32_buffer.clone()],
vec![],
)
.unwrap();
ArrayData::try_new(
data_type,
1,
Some(0),
None,
0,
vec![i32_buffer.clone(), i32_buffer],
vec![child_data],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Expected LargeUtf8 but child data had Utf8")]
fn test_mismatched_dictionary_types() {
// test w/ dictionary created with a child array data that has type different than declared
let string_array: StringArray =
vec![Some("foo"), Some("bar")].into_iter().collect();
let i32_buffer = Buffer::from_slice_ref(&[0i32, 1i32]);
// Dict says LargeUtf8 but array is Utf8
let data_type = DataType::Dictionary(
Box::new(DataType::Int32),
Box::new(DataType::LargeUtf8),
);
let child_data = string_array.data().clone();
ArrayData::try_new(
data_type,
1,
Some(0),
None,
0,
vec![i32_buffer],
vec![child_data],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Offsets buffer size (bytes): 8 isn't large enough for Utf8. Length 2 needs 3"
)]
fn test_validate_offsets_i32() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
let offsets_buffer = Buffer::from_slice_ref(&[0i32, 2i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Offsets buffer size (bytes): 16 isn't large enough for LargeUtf8. Length 2 needs 3"
)]
fn test_validate_offsets_i64() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
let offsets_buffer = Buffer::from_slice_ref(&[0i64, 2i64]);
ArrayData::try_new(
DataType::LargeUtf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Error converting offset[0] (-2) to usize for Utf8")]
fn test_validate_offsets_negative_first_i32() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
let offsets_buffer = Buffer::from_slice_ref(&[-2i32, 1i32, 3i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Error converting offset[2] (-3) to usize for Utf8")]
fn test_validate_offsets_negative_last_i32() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
let offsets_buffer = Buffer::from_slice_ref(&[0i32, 2i32, -3i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "First offset 4 in Utf8 is smaller than last offset 3")]
fn test_validate_offsets_range_too_small() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
// start offset is larger than end
let offsets_buffer = Buffer::from_slice_ref(&[4i32, 2i32, 3i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Last offset 10 of Utf8 is larger than values length 6")]
fn test_validate_offsets_range_too_large() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
// 10 is off the end of the buffer
let offsets_buffer = Buffer::from_slice_ref(&[0i32, 2i32, 10i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "First offset 10 of Utf8 is larger than values length 6")]
fn test_validate_offsets_first_too_large() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
// 10 is off the end of the buffer
let offsets_buffer = Buffer::from_slice_ref(&[10i32, 2i32, 10i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
fn test_validate_offsets_first_too_large_skipped() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
// 10 is off the end of the buffer, but offset starts at 1 so it is skipped
let offsets_buffer = Buffer::from_slice_ref(&[10i32, 2i32, 3i32, 4i32]);
let data = ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
1,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
let array: StringArray = data.into();
let expected: StringArray = vec![Some("c"), Some("d")].into_iter().collect();
assert_eq!(array, expected);
}
#[test]
#[should_panic(expected = "Last offset 8 of Utf8 is larger than values length 6")]
fn test_validate_offsets_last_too_large() {
let data_buffer = Buffer::from_slice_ref(&"abcdef".as_bytes());
// 10 is off the end of the buffer
let offsets_buffer = Buffer::from_slice_ref(&[5i32, 7i32, 8i32]);
ArrayData::try_new(
DataType::Utf8,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Values length 4 is less than the length (2) multiplied by the value size (2) for FixedSizeList"
)]
fn test_validate_fixed_size_list() {
// child has 4 elements,
let child_array = vec![Some(1), Some(2), Some(3), None]
.into_iter()
.collect::<Int32Array>();
// but claim we have 3 elements for a fixed size of 2
// 10 is off the end of the buffer
let field = Field::new("field", DataType::Int32, true);
ArrayData::try_new(
DataType::FixedSizeList(Box::new(field), 2),
3,
None,
None,
0,
vec![],
vec![child_array.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Child type mismatch for Struct")]
fn test_validate_struct_child_type() {
let field1 = vec![Some(1), Some(2), Some(3), None]
.into_iter()
.collect::<Int32Array>();
// validate the the type of struct fields matches child fields
ArrayData::try_new(
DataType::Struct(vec![Field::new("field1", DataType::Int64, true)]),
3,
None,
None,
0,
vec![],
vec![field1.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "child array #0 for field field1 has length smaller than expected for struct array (4 < 6)"
)]
fn test_validate_struct_child_length() {
// field length only has 4 items, but array claims to have 6
let field1 = vec![Some(1), Some(2), Some(3), None]
.into_iter()
.collect::<Int32Array>();
ArrayData::try_new(
DataType::Struct(vec![Field::new("field1", DataType::Int32, true)]),
6,
None,
None,
0,
vec![],
vec![field1.data().clone()],
)
.unwrap();
}
/// Test that the array of type `data_type` that has invalid utf8 data errors
fn check_utf8_validation<T: ArrowNativeType>(data_type: DataType) {
// 0x80 is a utf8 continuation sequence and is not a valid utf8 sequence itself
let data_buffer = Buffer::from_slice_ref(&[b'a', b'a', 0x80, 0x00]);
let offsets: Vec<T> = [0, 2, 3]
.iter()
.map(|&v| T::from_usize(v).unwrap())
.collect();
let offsets_buffer = Buffer::from_slice_ref(&offsets);
ArrayData::try_new(
data_type,
2,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Invalid UTF8 sequence at string index 1 (2..3)")]
fn test_validate_utf8_content() {
check_utf8_validation::<i32>(DataType::Utf8);
}
#[test]
#[should_panic(expected = "Invalid UTF8 sequence at string index 1 (2..3)")]
fn test_validate_large_utf8_content() {
check_utf8_validation::<i64>(DataType::LargeUtf8);
}
/// Test that the array of type `data_type` that has invalid indexes (out of bounds)
fn check_index_out_of_bounds_validation<T: ArrowNativeType>(data_type: DataType) {
let data_buffer = Buffer::from_slice_ref(&[b'a', b'b', b'c', b'd']);
// First two offsets are fine, then 5 is out of bounds
let offsets: Vec<T> = [0, 1, 2, 5, 2]
.iter()
.map(|&v| T::from_usize(v).unwrap())
.collect();
let offsets_buffer = Buffer::from_slice_ref(&offsets);
ArrayData::try_new(
data_type,
4,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Offset invariant failure: offset for slot 2 out of bounds: 5 > 4"
)]
fn test_validate_utf8_out_of_bounds() {
check_index_out_of_bounds_validation::<i32>(DataType::Utf8);
}
#[test]
#[should_panic(
expected = "Offset invariant failure: offset for slot 2 out of bounds: 5 > 4"
)]
fn test_validate_large_utf8_out_of_bounds() {
check_index_out_of_bounds_validation::<i64>(DataType::LargeUtf8);
}
#[test]
#[should_panic(
expected = "Offset invariant failure: offset for slot 2 out of bounds: 5 > 4"
)]
fn test_validate_binary_out_of_bounds() {
check_index_out_of_bounds_validation::<i32>(DataType::Binary);
}
#[test]
#[should_panic(
expected = "Offset invariant failure: offset for slot 2 out of bounds: 5 > 4"
)]
fn test_validate_large_binary_out_of_bounds() {
check_index_out_of_bounds_validation::<i64>(DataType::LargeBinary);
}
// validate that indexes don't go bacwards check indexes that go backwards
fn check_index_backwards_validation<T: ArrowNativeType>(data_type: DataType) {
let data_buffer = Buffer::from_slice_ref(&[b'a', b'b', b'c', b'd']);
// First three offsets are fine, then 1 goes backwards
let offsets: Vec<T> = [0, 1, 2, 2, 1]
.iter()
.map(|&v| T::from_usize(v).unwrap())
.collect();
let offsets_buffer = Buffer::from_slice_ref(&offsets);
ArrayData::try_new(
data_type,
4,
None,
None,
0,
vec![offsets_buffer, data_buffer],
vec![],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Offset invariant failure: non-monotonic offset at slot 3: 2 > 1"
)]
fn test_validate_utf8_index_backwards() {
check_index_backwards_validation::<i32>(DataType::Utf8);
}
#[test]
#[should_panic(
expected = "Offset invariant failure: non-monotonic offset at slot 3: 2 > 1"
)]
fn test_validate_large_utf8_index_backwards() {
check_index_backwards_validation::<i64>(DataType::LargeUtf8);
}
#[test]
#[should_panic(
expected = "Offset invariant failure: non-monotonic offset at slot 3: 2 > 1"
)]
fn test_validate_binary_index_backwards() {
check_index_backwards_validation::<i32>(DataType::Binary);
}
#[test]
#[should_panic(
expected = "Offset invariant failure: non-monotonic offset at slot 3: 2 > 1"
)]
fn test_validate_large_binary_index_backwards() {
check_index_backwards_validation::<i64>(DataType::LargeBinary);
}
#[test]
#[should_panic(
expected = "Value at position 1 out of bounds: 3 (should be in [0, 1])"
)]
fn test_validate_dictionary_index_too_large() {
let values: StringArray = [Some("foo"), Some("bar")].into_iter().collect();
// 3 is not a valid index into the values (only 0 and 1)
let keys: Int32Array = [Some(1), Some(3)].into_iter().collect();
let data_type = DataType::Dictionary(
Box::new(keys.data_type().clone()),
Box::new(values.data_type().clone()),
);
ArrayData::try_new(
data_type,
2,
None,
None,
0,
vec![keys.data().buffers[0].clone()],
vec![values.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Value at position 1 out of bounds: -1 (should be in [0, 1]"
)]
fn test_validate_dictionary_index_negative() {
let values: StringArray = [Some("foo"), Some("bar")].into_iter().collect();
// -1 is not a valid index at all!
let keys: Int32Array = [Some(1), Some(-1)].into_iter().collect();
let data_type = DataType::Dictionary(
Box::new(keys.data_type().clone()),
Box::new(values.data_type().clone()),
);
ArrayData::try_new(
data_type,
2,
None,
None,
0,
vec![keys.data().buffers[0].clone()],
vec![values.data().clone()],
)
.unwrap();
}
#[test]
fn test_validate_dictionary_index_negative_but_not_referenced() {
let values: StringArray = [Some("foo"), Some("bar")].into_iter().collect();
// -1 is not a valid index at all, but the array is length 1
// so the -1 should not be looked at
let keys: Int32Array = [Some(1), Some(-1)].into_iter().collect();
let data_type = DataType::Dictionary(
Box::new(keys.data_type().clone()),
Box::new(values.data_type().clone()),
);
// Expect this not to panic
ArrayData::try_new(
data_type,
1,
None,
None,
0,
vec![keys.data().buffers[0].clone()],
vec![values.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Value at position 0 out of bounds: 18446744073709551615 (can not convert to i64)"
)]
fn test_validate_dictionary_index_giant_negative() {
let values: StringArray = [Some("foo"), Some("bar")].into_iter().collect();
// -1 is not a valid index at all!
let keys: UInt64Array = [Some(u64::MAX), Some(1)].into_iter().collect();
let data_type = DataType::Dictionary(
Box::new(keys.data_type().clone()),
Box::new(values.data_type().clone()),
);
ArrayData::try_new(
data_type,
2,
None,
None,
0,
vec![keys.data().buffers[0].clone()],
vec![values.data().clone()],
)
.unwrap();
}
/// Test that the list of type `data_type` generates correct offset out of bounds errors
fn check_list_offsets<T: ArrowNativeType>(data_type: DataType) {
let values: Int32Array =
[Some(1), Some(2), Some(3), Some(4)].into_iter().collect();
// 5 is an invalid offset into a list of only three values
let offsets: Vec<T> = [0, 2, 5, 4]
.iter()
.map(|&v| T::from_usize(v).unwrap())
.collect();
let offsets_buffer = Buffer::from_slice_ref(&offsets);
ArrayData::try_new(
data_type,
3,
None,
None,
0,
vec![offsets_buffer],
vec![values.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Offset invariant failure: offset for slot 1 out of bounds: 5 > 4"
)]
fn test_validate_list_offsets() {
let field_type = Field::new("f", DataType::Int32, true);
check_list_offsets::<i32>(DataType::List(Box::new(field_type)));
}
#[test]
#[should_panic(
expected = "Offset invariant failure: offset for slot 1 out of bounds: 5 > 4"
)]
fn test_validate_large_list_offsets() {
let field_type = Field::new("f", DataType::Int32, true);
check_list_offsets::<i64>(DataType::LargeList(Box::new(field_type)));
}
/// Test that the list of type `data_type` generates correct errors for negative offsets
#[test]
#[should_panic(
expected = "Offset invariant failure: Could not convert end_offset -1 to usize in slot 2"
)]
fn test_validate_list_negative_offsets() {
let values: Int32Array =
[Some(1), Some(2), Some(3), Some(4)].into_iter().collect();
let field_type = Field::new("f", values.data_type().clone(), true);
let data_type = DataType::List(Box::new(field_type));
// -1 is an invalid offset any way you look at it
let offsets: Vec<i32> = vec![0, 2, -1, 4];
let offsets_buffer = Buffer::from_slice_ref(&offsets);
ArrayData::try_new(
data_type,
3,
None,
None,
0,
vec![offsets_buffer],
vec![values.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "child #0 invalid: Invalid argument error: Value at position 1 out of bounds: -1 (should be in [0, 1])"
)]
/// test that children are validated recursively (aka bugs in child data of struct also are flagged)
fn test_validate_recursive() {
// Form invalid dictionary array
let values: StringArray = [Some("foo"), Some("bar")].into_iter().collect();
// -1 is not a valid index
let keys: Int32Array = [Some(1), Some(-1), Some(1)].into_iter().collect();
let dict_data_type = DataType::Dictionary(
Box::new(keys.data_type().clone()),
Box::new(values.data_type().clone()),
);
// purposely create an invalid child data
let dict_data = unsafe {
ArrayData::new_unchecked(
dict_data_type,
2,
None,
None,
0,
vec![keys.data().buffers[0].clone()],
vec![values.data().clone()],
)
};
// Now, try and create a struct with this invalid child data (and expect an error)
let data_type =
DataType::Struct(vec![Field::new("d", dict_data.data_type().clone(), true)]);
ArrayData::try_new(data_type, 1, None, None, 0, vec![], vec![dict_data]).unwrap();
}
/// returns a buffer initialized with some constant value for tests
fn make_i32_buffer(n: usize) -> Buffer {
Buffer::from_slice_ref(&vec![42i32; n])
}
/// returns a buffer initialized with some constant value for tests
fn make_f32_buffer(n: usize) -> Buffer {
Buffer::from_slice_ref(&vec![42f32; n])
}
#[test]
#[should_panic(expected = "Expected Int64 but child data had Int32")]
fn test_validate_union_different_types() {
let field1 = vec![Some(1), Some(2)].into_iter().collect::<Int32Array>();
let field2 = vec![Some(1), Some(2)].into_iter().collect::<Int32Array>();
let type_ids = Buffer::from_slice_ref(&[0i8, 1i8]);
ArrayData::try_new(
DataType::Union(
vec![
Field::new("field1", DataType::Int32, true),
Field::new("field2", DataType::Int64, true), // data is int32
],
UnionMode::Sparse,
),
2,
None,
None,
0,
vec![type_ids],
vec![field1.data().clone(), field2.data().clone()],
)
.unwrap();
}
// sparse with wrong sized children
#[test]
#[should_panic(
expected = "Sparse union child array #1 has length smaller than expected for union array (1 < 2)"
)]
fn test_validate_union_sparse_different_child_len() {
let field1 = vec![Some(1), Some(2)].into_iter().collect::<Int32Array>();
// field 2 only has 1 item but array should have 2
let field2 = vec![Some(1)].into_iter().collect::<Int64Array>();
let type_ids = Buffer::from_slice_ref(&[0i8, 1i8]);
ArrayData::try_new(
DataType::Union(
vec![
Field::new("field1", DataType::Int32, true),
Field::new("field2", DataType::Int64, true),
],
UnionMode::Sparse,
),
2,
None,
None,
0,
vec![type_ids],
vec![field1.data().clone(), field2.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(expected = "Expected 2 buffers in array of type Union")]
fn test_validate_union_dense_without_offsets() {
let field1 = vec![Some(1), Some(2)].into_iter().collect::<Int32Array>();
let field2 = vec![Some(1)].into_iter().collect::<Int64Array>();
let type_ids = Buffer::from_slice_ref(&[0i8, 1i8]);
ArrayData::try_new(
DataType::Union(
vec![
Field::new("field1", DataType::Int32, true),
Field::new("field2", DataType::Int64, true),
],
UnionMode::Dense,
),
2,
None,
None,
0,
vec![type_ids], // need offsets buffer here too
vec![field1.data().clone(), field2.data().clone()],
)
.unwrap();
}
#[test]
#[should_panic(
expected = "Need at least 8 bytes in buffers[1] in array of type Union"
)]
fn test_validate_union_dense_with_bad_len() {
let field1 = vec![Some(1), Some(2)].into_iter().collect::<Int32Array>();
let field2 = vec![Some(1)].into_iter().collect::<Int64Array>();
let type_ids = Buffer::from_slice_ref(&[0i8, 1i8]);
let offsets = Buffer::from_slice_ref(&[0i32]); // should have 2 offsets, but only have 1
ArrayData::try_new(
DataType::Union(
vec![
Field::new("field1", DataType::Int32, true),
Field::new("field2", DataType::Int64, true),
],
UnionMode::Dense,
),
2,
None,
None,
0,
vec![type_ids, offsets],
vec![field1.data().clone(), field2.data().clone()],
)
.unwrap();
}
#[test]
fn test_try_new_sliced_struct() {
let mut builder = StructBuilder::new(
vec![
Field::new("a", DataType::Int32, true),
Field::new("b", DataType::Boolean, true),
],
vec![
Box::new(Int32Builder::new(5)),
Box::new(BooleanBuilder::new(5)),
],
);
// struct[0] = { a: 10, b: true }
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_option(Some(10))
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_option(Some(true))
.unwrap();
builder.append(true).unwrap();
// struct[1] = null
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_option(None)
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_option(None)
.unwrap();
builder.append(false).unwrap();
// struct[2] = { a: null, b: false }
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_option(None)
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_option(Some(false))
.unwrap();
builder.append(true).unwrap();
// struct[3] = { a: 21, b: null }
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_option(Some(21))
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_option(None)
.unwrap();
builder.append(true).unwrap();
// struct[4] = { a: 18, b: false }
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_option(Some(18))
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_option(Some(false))
.unwrap();
builder.append(true).unwrap();
let struct_array = builder.finish();
let struct_array_slice = struct_array.slice(1, 3);
let struct_array_data = struct_array_slice.data();
let cloned_data = ArrayData::try_new(
struct_array_slice.data_type().clone(),
struct_array_slice.len(),
None, // force new to compute the number of null bits
struct_array_data.null_buffer().cloned(),
struct_array_slice.offset(),
struct_array_data.buffers().to_vec(),
struct_array_data.child_data().to_vec(),
)
.unwrap();
let cloned = crate::array::make_array(cloned_data);
assert_eq!(&struct_array_slice, &cloned);
}
}
| 34.562741 | 122 | 0.535348 |
3340c3a731de18980a82f94ff150eebec5225759 | 9,928 | use crate::{
state::{
middleware::localize::LocalizeStore,
ChangeLastSelectedCurrency, CosterAction, StateCallback, StateStoreRef,
},
AppRoute,
};
use anyhow::anyhow;
use commodity::CommodityType;
use costing::Tab;
use form_validation::{
concat_results, Validatable, Validation, ValidationError, ValidationErrors, Validator, ValidatorFn, AsyncValidator,
};
use log::error;
use std::{fmt::Display, rc::Rc};
use tr::tr;
use uuid::Uuid;
use yew::{html, Component, ComponentLink, Html, Properties, ShouldRender};
use yew_bulma::components::form::{Form, FormFieldLink, FieldKey, input_field::TextInput, select_field::SelectField, FieldMsg};
use switch_router_middleware::RouteStore;
#[derive(PartialEq, Clone, Copy, Hash, Eq, Debug)]
enum FormFields {
Name,
WorkingCurrency,
// Participant(u32),
}
impl FieldKey for FormFields {}
impl Display for FormFields {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
pub struct FormData {
pub name: String,
pub working_currency: Option<CommodityType>,
}
impl FormData {
pub fn create_tab(&self) -> Result<Tab, anyhow::Error> {
self.validate()
.map_err(|e| anyhow!("error validating FormData: {}", e))?;
let working_currency_id = match &self.working_currency {
Some(working_currency) => working_currency.id,
None => return Err(anyhow!("empty working_currency in FormData")),
};
Ok(Tab::new(
Uuid::new_v4(),
self.name.clone(),
working_currency_id,
Vec::new(),
Vec::new(),
))
}
}
impl FormData {
fn name_validator() -> Validator<String, FormFields> {
Validator::new().validation(ValidatorFn::new(|name_value: &String, _| {
if name_value.trim().is_empty() {
Err(ValidationError::new(FormFields::Name, "coster::costing_tab::field_is_empty")
.with_message(|key| tr!("{0} cannot be empty", key)).into())
} else {
Ok(())
}
}))
}
fn working_currency_validator() -> Validator<Option<CommodityType>, FormFields> {
Validator::new().validation(ValidatorFn::new(|working_currency: &Option<CommodityType>, _| {
if working_currency.is_none() {
Err(ValidationError::new(FormFields::WorkingCurrency, "coster::costing_tab::working_currency_not_selected")
.with_message(|_| tr!("Please select a working currency")).into())
} else {
Ok(())
}
}))
}
}
impl Validatable<FormFields> for FormData {
fn validate(&self) -> Result<(), ValidationErrors<FormFields>> {
concat_results(vec![
Self::name_validator()
.validate_value(&self.name, &FormFields::Name),
Self::working_currency_validator()
.validate_value(&self.working_currency, &FormFields::WorkingCurrency),
])
}
}
impl Default for FormData {
fn default() -> Self {
Self {
name: "".to_string(),
working_currency: None,
}
}
}
pub struct NewCostingTab {
form_data: FormData,
props: Props,
currencies: Vec<CommodityType>,
link: ComponentLink<Self>,
form_field_link: FormFieldLink<FormFields>,
form_is_valid: bool,
_language_changed_callback: StateCallback,
}
#[derive(Clone)]
pub enum Msg {
UpdateName(String),
UpdateWorkingCurrency(CommodityType),
UpdateFormIsValid(bool),
Create,
Cancel,
LanguageChanged,
}
#[derive(Clone, Properties, PartialEq)]
pub struct Props {
pub state_store: StateStoreRef,
}
impl Component for NewCostingTab {
type Message = Msg;
type Properties = Props;
fn create(props: Props, link: ComponentLink<Self>) -> Self {
let mut currencies = commodity::all_iso4217_currencies();
currencies.sort_by(|a, b| a.id.cmp(&b.id));
let callback = props
.state_store
.subscribe_language_changed(&link, Msg::LanguageChanged);
let mut form_data = FormData::default();
form_data.working_currency = props.state_store.state().last_selected_currency.clone();
NewCostingTab {
form_data,
props,
currencies,
link,
// Form is displayed as valid (until validations arrive)
form_is_valid: true,
form_field_link: FormFieldLink::new(),
_language_changed_callback: callback,
}
}
fn update(&mut self, msg: Msg) -> ShouldRender {
match msg {
Msg::UpdateName(name) => {
self.form_data.name = name.trim().to_string();
true
}
Msg::UpdateWorkingCurrency(working_currency) => {
self.form_data.working_currency = Some(working_currency);
true
}
Msg::Create => {
// Trigger all the fields to display their validations.
self.form_field_link.send_all_fields_message(FieldMsg::Validate);
self.form_is_valid = self.form_data.validate().is_ok();
if self.form_is_valid {
self.props.state_store.dispatch(ChangeLastSelectedCurrency {
last_selected_currency: self.form_data.working_currency.clone(),
write_to_database: true,
});
let tab = match self.form_data.create_tab() {
Ok(tab) => tab,
Err(err) => {
error!("{}", err);
return false;
}
};
self.props.state_store.dispatch(CosterAction::CreateTab {
tab: Rc::new(tab),
write_to_database: true,
});
self.props.state_store.change_route(AppRoute::Index);
}
true
}
Msg::Cancel => {
self.props.state_store.change_route(AppRoute::Index);
true
}
Msg::LanguageChanged => true,
Msg::UpdateFormIsValid(is_valid) => {
self.form_is_valid = is_valid;
true
}
}
}
fn change(&mut self, props: Self::Properties) -> ShouldRender {
if self.props != props {
self.props = props;
true
} else {
false
}
}
fn view(&self) -> Html {
let onclick_cancel = self.link.callback(|_| Msg::Cancel);
let onclick_submit = self.link.callback(|_| Msg::Create);
let onupdate_working_currency = self.link.callback(Msg::UpdateWorkingCurrency);
let onupdate_name = self
.link
.callback(Msg::UpdateName);
let onvalidateupdate = self
.link
.callback(|errors: ValidationErrors<FormFields>| Msg::UpdateFormIsValid(errors.is_empty()));
let tab_name_label = tr!("Tab Name");
let name_validator: AsyncValidator<String, FormFields> = FormData::name_validator().into();
let working_currency_validator: AsyncValidator<Option<CommodityType>, FormFields> = FormData::working_currency_validator().into();
html! {
<>
<nav class="level">
<div class="level-left">
<div class="level-item">
<h3 class="title is-3">{ tr!("New Tab") }</h3>
</div>
</div>
</nav>
<div class="card">
<Form<FormFields>
form_link = self.form_field_link.clone()
onvalidateupdate=onvalidateupdate
>
<TextInput<FormFields>
label = tab_name_label.clone()
field_key = FormFields::Name
form_link = self.form_field_link.clone()
placeholder = tab_name_label
validator = name_validator
onupdate = onupdate_name
/>
<SelectField<CommodityType, FormFields>
label = tr!("Working Currency")
field_key = FormFields::WorkingCurrency
form_link = self.form_field_link.clone()
options = self.currencies.clone()
validator = working_currency_validator
onupdate = onupdate_working_currency
selected = self.form_data.working_currency.clone()
/>
</Form<FormFields>>
<div class="field is-grouped">
<div class="control">
<button
class="button is-link"
onclick=onclick_submit
disabled=!self.form_is_valid>
{ tr!("Submit") }
</button>
</div>
<div class="control">
<button
class="button is-link is-light"
onclick=onclick_cancel>
{ tr!("Cancel") }
</button>
</div>
</div>
</div>
</>
}
}
fn rendered(&mut self, _first_render: bool) {}
}
| 34.472222 | 138 | 0.519843 |
e9212b5e37d9de8b609a00ea08c72a80faffa85a | 17,915 | //! Create interactive, native cross-platform applications.
mod state;
pub use state::State;
use crate::clipboard::{self, Clipboard};
use crate::conversion;
use crate::mouse;
use crate::{
Color, Command, Debug, Error, Executor, Mode, Proxy, Runtime, Settings,
Size, Subscription,
};
use iced_futures::futures;
use iced_futures::futures::channel::mpsc;
use iced_graphics::window;
use iced_native::program::Program;
use iced_native::user_interface::{self, UserInterface};
use std::mem::ManuallyDrop;
/// An interactive, native cross-platform application.
///
/// This trait is the main entrypoint of Iced. Once implemented, you can run
/// your GUI application by simply calling [`run`]. It will run in
/// its own window.
///
/// An [`Application`] can execute asynchronous actions by returning a
/// [`Command`] in some of its methods.
///
/// When using an [`Application`] with the `debug` feature enabled, a debug view
/// can be toggled by pressing `F12`.
pub trait Application: Program {
/// The data needed to initialize your [`Application`].
type Flags;
/// Initializes the [`Application`] with the flags provided to
/// [`run`] as part of the [`Settings`].
///
/// Here is where you should return the initial state of your app.
///
/// Additionally, you can return a [`Command`] if you need to perform some
/// async action in the background on startup. This is useful if you want to
/// load state from a file, perform an initial HTTP request, etc.
fn new(flags: Self::Flags) -> (Self, Command<Self::Message>);
/// Returns the current title of the [`Application`].
///
/// This title can be dynamic! The runtime will automatically update the
/// title of your application when necessary.
fn title(&self) -> String;
/// Returns the event `Subscription` for the current state of the
/// application.
///
/// The messages produced by the `Subscription` will be handled by
/// [`update`](#tymethod.update).
///
/// A `Subscription` will be kept alive as long as you keep returning it!
///
/// By default, it returns an empty subscription.
fn subscription(&self) -> Subscription<Self::Message> {
Subscription::none()
}
/// Returns the current [`Application`] mode.
///
/// The runtime will automatically transition your application if a new mode
/// is returned.
///
/// By default, an application will run in windowed mode.
fn mode(&self) -> Mode {
Mode::Windowed
}
/// Returns the background [`Color`] of the [`Application`].
///
/// By default, it returns [`Color::WHITE`].
fn background_color(&self) -> Color {
Color::WHITE
}
/// Returns the scale factor of the [`Application`].
///
/// It can be used to dynamically control the size of the UI at runtime
/// (i.e. zooming).
///
/// For instance, a scale factor of `2.0` will make widgets twice as big,
/// while a scale factor of `0.5` will shrink them to half their size.
///
/// By default, it returns `1.0`.
fn scale_factor(&self) -> f64 {
1.0
}
/// Returns whether the [`Application`] should be terminated.
///
/// By default, it returns `false`.
fn should_exit(&self) -> bool {
false
}
}
/// Runs an [`Application`] with an executor, compositor, and the provided
/// settings.
pub fn run<A, E, C>(
settings: Settings<A::Flags>,
compositor_settings: C::Settings,
) -> Result<(), Error>
where
A: Application + 'static,
E: Executor + 'static,
C: window::Compositor<Renderer = A::Renderer> + 'static,
{
use futures::task;
use futures::Future;
use winit::event_loop::EventLoop;
use winit::platform::run_return::EventLoopExtRunReturn;
let mut debug = Debug::new();
debug.startup_started();
let mut event_loop = EventLoop::with_user_event();
let mut proxy = event_loop.create_proxy();
let mut runtime = {
let proxy = Proxy::new(event_loop.create_proxy());
let executor = E::new().map_err(Error::ExecutorCreationFailed)?;
Runtime::new(executor, proxy)
};
let (application, init_command) = {
let flags = settings.flags;
runtime.enter(|| A::new(flags))
};
let subscription = application.subscription();
let window = settings
.window
.into_builder(
&application.title(),
application.mode(),
event_loop.primary_monitor(),
settings.id,
)
.build(&event_loop)
.map_err(Error::WindowCreationFailed)?;
let mut clipboard = Clipboard::connect(&window);
run_command(
init_command,
&mut runtime,
&mut clipboard,
&mut proxy,
&window,
);
runtime.track(subscription);
let (compositor, renderer) = C::new(compositor_settings, Some(&window))?;
let (mut sender, receiver) = mpsc::unbounded();
let mut instance = Box::pin(run_instance::<A, E, C>(
application,
compositor,
renderer,
runtime,
clipboard,
proxy,
debug,
receiver,
window,
settings.exit_on_close_request,
));
let mut context = task::Context::from_waker(task::noop_waker_ref());
event_loop.run_return(move |event, _, control_flow| {
use winit::event_loop::ControlFlow;
if let ControlFlow::Exit = control_flow {
return;
}
let event = match event {
winit::event::Event::WindowEvent {
event:
winit::event::WindowEvent::ScaleFactorChanged {
new_inner_size,
..
},
window_id,
} => Some(winit::event::Event::WindowEvent {
event: winit::event::WindowEvent::Resized(*new_inner_size),
window_id,
}),
_ => event.to_static(),
};
if let Some(event) = event {
sender.start_send(event).expect("Send event");
let poll = instance.as_mut().poll(&mut context);
*control_flow = match poll {
task::Poll::Pending => ControlFlow::Wait,
task::Poll::Ready(_) => ControlFlow::Exit,
};
}
});
Ok(())
}
async fn run_instance<A, E, C>(
mut application: A,
mut compositor: C,
mut renderer: A::Renderer,
mut runtime: Runtime<E, Proxy<A::Message>, A::Message>,
mut clipboard: Clipboard,
mut proxy: winit::event_loop::EventLoopProxy<A::Message>,
mut debug: Debug,
mut receiver: mpsc::UnboundedReceiver<winit::event::Event<'_, A::Message>>,
window: winit::window::Window,
exit_on_close_request: bool,
) where
A: Application + 'static,
E: Executor + 'static,
C: window::Compositor<Renderer = A::Renderer> + 'static,
{
use iced_futures::futures::stream::StreamExt;
use winit::event;
let mut surface = compositor.create_surface(&window);
let mut state = State::new(&application, &window);
let mut viewport_version = state.viewport_version();
let physical_size = state.physical_size();
compositor.configure_surface(
&mut surface,
physical_size.width,
physical_size.height,
);
let mut user_interface = ManuallyDrop::new(build_user_interface(
&mut application,
user_interface::Cache::default(),
&mut renderer,
state.logical_size(),
&mut debug,
));
let mut mouse_interaction = mouse::Interaction::default();
let mut events = Vec::new();
let mut messages = Vec::new();
debug.startup_finished();
while let Some(event) = receiver.next().await {
match event {
event::Event::MainEventsCleared => {
if events.is_empty() && messages.is_empty() {
continue;
}
debug.event_processing_started();
let (interface_state, statuses) = user_interface.update(
&events,
state.cursor_position(),
&mut renderer,
&mut clipboard,
&mut messages,
);
debug.event_processing_finished();
for event in events.drain(..).zip(statuses.into_iter()) {
runtime.broadcast(event);
}
if !messages.is_empty()
|| matches!(
interface_state,
user_interface::State::Outdated,
)
{
let cache =
ManuallyDrop::into_inner(user_interface).into_cache();
// Update application
update(
&mut application,
&mut runtime,
&mut clipboard,
&mut proxy,
&mut debug,
&mut messages,
&window,
);
// Update window
state.synchronize(&application, &window);
let should_exit = application.should_exit();
user_interface = ManuallyDrop::new(build_user_interface(
&mut application,
cache,
&mut renderer,
state.logical_size(),
&mut debug,
));
if should_exit {
break;
}
}
debug.draw_started();
let new_mouse_interaction =
user_interface.draw(&mut renderer, state.cursor_position());
debug.draw_finished();
if new_mouse_interaction != mouse_interaction {
window.set_cursor_icon(conversion::mouse_interaction(
new_mouse_interaction,
));
mouse_interaction = new_mouse_interaction;
}
window.request_redraw();
}
event::Event::PlatformSpecific(event::PlatformSpecific::MacOS(
event::MacOS::ReceivedUrl(url),
)) => {
use iced_native::event;
events.push(iced_native::Event::PlatformSpecific(
event::PlatformSpecific::MacOS(event::MacOS::ReceivedUrl(
url,
)),
));
}
event::Event::UserEvent(message) => {
messages.push(message);
}
event::Event::RedrawRequested(_) => {
let physical_size = state.physical_size();
if physical_size.width == 0 || physical_size.height == 0 {
continue;
}
debug.render_started();
let current_viewport_version = state.viewport_version();
if viewport_version != current_viewport_version {
let logical_size = state.logical_size();
debug.layout_started();
user_interface = ManuallyDrop::new(
ManuallyDrop::into_inner(user_interface)
.relayout(logical_size, &mut renderer),
);
debug.layout_finished();
debug.draw_started();
let new_mouse_interaction = user_interface
.draw(&mut renderer, state.cursor_position());
if new_mouse_interaction != mouse_interaction {
window.set_cursor_icon(conversion::mouse_interaction(
new_mouse_interaction,
));
mouse_interaction = new_mouse_interaction;
}
debug.draw_finished();
compositor.configure_surface(
&mut surface,
physical_size.width,
physical_size.height,
);
viewport_version = current_viewport_version;
}
match compositor.present(
&mut renderer,
&mut surface,
state.viewport(),
state.background_color(),
&debug.overlay(),
) {
Ok(()) => {
debug.render_finished();
// TODO: Handle animations!
// Maybe we can use `ControlFlow::WaitUntil` for this.
}
Err(error) => match error {
// This is an unrecoverable error.
window::SurfaceError::OutOfMemory => {
panic!("{:?}", error);
}
_ => {
debug.render_finished();
// Try rendering again next frame.
window.request_redraw();
}
},
}
}
event::Event::WindowEvent {
event: window_event,
..
} => {
if requests_exit(&window_event, state.modifiers())
&& exit_on_close_request
{
break;
}
state.update(&window, &window_event, &mut debug);
if let Some(event) = conversion::window_event(
&window_event,
state.scale_factor(),
state.modifiers(),
) {
events.push(event);
}
}
_ => {}
}
}
// Manually drop the user interface
drop(ManuallyDrop::into_inner(user_interface));
}
/// Returns true if the provided event should cause an [`Application`] to
/// exit.
pub fn requests_exit(
event: &winit::event::WindowEvent<'_>,
_modifiers: winit::event::ModifiersState,
) -> bool {
use winit::event::WindowEvent;
match event {
WindowEvent::CloseRequested => true,
#[cfg(target_os = "macos")]
WindowEvent::KeyboardInput {
input:
winit::event::KeyboardInput {
virtual_keycode: Some(winit::event::VirtualKeyCode::Q),
state: winit::event::ElementState::Pressed,
..
},
..
} if _modifiers.logo() => true,
_ => false,
}
}
/// Builds a [`UserInterface`] for the provided [`Application`], logging
/// [`struct@Debug`] information accordingly.
pub fn build_user_interface<'a, A: Application>(
application: &'a mut A,
cache: user_interface::Cache,
renderer: &mut A::Renderer,
size: Size,
debug: &mut Debug,
) -> UserInterface<'a, A::Message, A::Renderer> {
debug.view_started();
let view = application.view();
debug.view_finished();
debug.layout_started();
let user_interface = UserInterface::build(view, size, cache, renderer);
debug.layout_finished();
user_interface
}
/// Updates an [`Application`] by feeding it the provided messages, spawning any
/// resulting [`Command`], and tracking its [`Subscription`].
pub fn update<A: Application, E: Executor>(
application: &mut A,
runtime: &mut Runtime<E, Proxy<A::Message>, A::Message>,
clipboard: &mut Clipboard,
proxy: &mut winit::event_loop::EventLoopProxy<A::Message>,
debug: &mut Debug,
messages: &mut Vec<A::Message>,
window: &winit::window::Window,
) {
for message in messages.drain(..) {
debug.log_message(&message);
debug.update_started();
let command = runtime.enter(|| application.update(message));
debug.update_finished();
run_command(command, runtime, clipboard, proxy, window);
}
let subscription = application.subscription();
runtime.track(subscription);
}
/// Runs the actions of a [`Command`].
pub fn run_command<Message: 'static + std::fmt::Debug + Send, E: Executor>(
command: Command<Message>,
runtime: &mut Runtime<E, Proxy<Message>, Message>,
clipboard: &mut Clipboard,
proxy: &mut winit::event_loop::EventLoopProxy<Message>,
window: &winit::window::Window,
) {
use iced_native::command;
use iced_native::window;
for action in command.actions() {
match action {
command::Action::Future(future) => {
runtime.spawn(future);
}
command::Action::Clipboard(action) => match action {
clipboard::Action::Read(tag) => {
let message = tag(clipboard.read());
proxy
.send_event(message)
.expect("Send message to event loop");
}
clipboard::Action::Write(contents) => {
clipboard.write(contents);
}
},
command::Action::Window(action) => match action {
window::Action::Resize { width, height } => {
window.set_inner_size(winit::dpi::LogicalSize {
width,
height,
});
}
window::Action::Move { x, y } => {
window.set_outer_position(winit::dpi::LogicalPosition {
x,
y,
});
}
},
}
}
}
| 31.707965 | 80 | 0.525147 |
2802f007e85d3f1f351988612a4284f37a14a2f4 | 14,436 | use std::time::Duration;
use crate::annotations::*;
use crate::display_params::*;
use crate::draw::DrawingState;
use crate::CliResult;
use ansi_term::ANSIString;
use ansi_term::Color::*;
#[allow(unused_imports)]
use std::convert::TryFrom;
use tract_core::internal::*;
use tract_core::itertools::Itertools;
pub fn render(
model: &dyn Model,
annotations: &Annotations,
options: &DisplayParams,
) -> CliResult<()> {
render_prefixed(model, "", &[], annotations, options)
}
pub fn render_node(
model: &dyn Model,
node_id: usize,
annotations: &Annotations,
options: &DisplayParams,
) -> CliResult<()> {
render_node_prefixed(model, "", &[], node_id, None, annotations, options)
}
fn render_prefixed(
model: &dyn Model,
prefix: &str,
scope: &[(usize, String)],
annotations: &Annotations,
options: &DisplayParams,
) -> CliResult<()> {
let mut drawing_state =
if options.should_draw() { Some(DrawingState::default()) } else { None };
let node_ids = if options.natural_order {
(0..model.nodes_len()).collect()
} else {
model.eval_order()?
};
for node in node_ids {
if options.filter(model, scope, node)? {
render_node_prefixed(
model,
prefix,
scope,
node,
drawing_state.as_mut(),
annotations,
options,
)?
} else if let Some(ref mut ds) = drawing_state {
let _prefix = ds.draw_node_vprefix(model, node, &options)?;
let _body = ds.draw_node_body(model, node, &options)?;
let _suffix = ds.draw_node_vsuffix(model, node, &options)?;
}
}
Ok(())
}
fn render_node_prefixed(
model: &dyn Model,
prefix: &str,
scope: &[(usize, String)],
node_id: usize,
mut drawing_state: Option<&mut DrawingState>,
annotations: &Annotations,
options: &DisplayParams,
) -> CliResult<()> {
let qid = NodeQId(scope.into(), node_id);
let tags = annotations.tags.get(&qid).cloned().unwrap_or_default();
let name_color = tags.style.clone().unwrap_or(White.into());
let node_name = model.node_name(node_id);
let node_op_name = model.node_op(node_id).name();
let cost_column_pad = format!("{:>1$}", "", options.cost as usize * 25);
let profile_column_pad = format!("{:>1$}", "", options.profile as usize * 20);
if let Some(ref mut ds) = &mut drawing_state {
for l in ds.draw_node_vprefix(model, node_id, &options)? {
println!("{}{}{}{} ", cost_column_pad, profile_column_pad, prefix, l);
}
}
// cost column
let mut cost_column = if options.cost {
Some(
tags.cost
.iter()
.map(|c| {
let key = format!("{:?}:", c.0);
let value = render_tdim(&c.1);
let value_visible_len = c.1.to_string().len();
let padding = 25usize.saturating_sub(value_visible_len + key.len());
key + &value + &*std::iter::repeat(' ').take(padding).join("")
})
.peekable(),
)
} else {
None
};
// profile column
let mut profile_column = tags.profile.map(|measure| {
let profile_summary = annotations.profile_summary.as_ref().unwrap();
let ratio = measure.as_secs_f64() / profile_summary.sum.as_secs_f64();
let ratio_for_color = measure.as_secs_f64() / profile_summary.max.as_secs_f64();
let color = colorous::RED_YELLOW_GREEN.eval_continuous(1.0 - ratio_for_color);
let color = ansi_term::Color::RGB(color.r, color.g, color.b);
let label = format!(
"{:7.3} ms/i {} ",
measure.as_secs_f64() * 1e3,
color.bold().paint(format!("{:>4.1}%", ratio * 100.0))
);
std::iter::once(label)
});
// drawing column
let mut drawing_lines: Box<dyn Iterator<Item = String>> =
if let Some(ds) = drawing_state.as_mut() {
let body = ds.draw_node_body(model, node_id, options)?;
let suffix = ds.draw_node_vsuffix(model, node_id, options)?;
let filler = ds.draw_node_vfiller(model, node_id)?;
Box::new(body.into_iter().chain(suffix.into_iter()).chain(std::iter::repeat(filler)))
} else {
Box::new(std::iter::repeat(cost_column_pad.clone()))
};
macro_rules! prefix {
() => {
let cost = cost_column
.as_mut()
.map(|it| it.next().unwrap_or_else(|| cost_column_pad.to_string()))
.unwrap_or("".to_string());
let profile = profile_column
.as_mut()
.map(|it| it.next().unwrap_or_else(|| profile_column_pad.to_string()))
.unwrap_or("".to_string());
print!("{}{}{}{} ", cost, profile, prefix, drawing_lines.next().unwrap(),)
};
};
prefix!();
println!(
"{} {} {}",
White.bold().paint(format!("{}", node_id)),
(if node_name == "UnimplementedOp" {
Red.bold()
} else {
if options.expect_canonic && !model.node_op(node_id).is_canonic() {
Yellow.bold()
} else {
Blue.bold()
}
})
.paint(node_op_name),
name_color.italic().paint(node_name)
);
for label in tags.labels.iter() {
prefix!();
println!(" * {}", label);
}
match options.io {
Io::Long => {
for (ix, i) in model.node_inputs(node_id).iter().enumerate() {
let star = if ix == 0 { '*' } else { ' ' };
prefix!();
println!(
" {} input fact #{}: {} {}",
star,
ix,
White.bold().paint(format!("{:?}", i)),
model.outlet_fact_format(*i),
);
}
for ix in 0..model.node_output_count(node_id) {
let star = if ix == 0 { '*' } else { ' ' };
let io = if let Some(id) =
model.input_outlets().iter().position(|n| n.node == node_id && n.slot == ix)
{
format!(
"{} {}",
Cyan.bold().paint(format!("MODEL INPUT #{}", id)).to_string(),
tags.model_input.as_ref().map(|s| &**s).unwrap_or("")
)
} else if let Some(id) =
model.output_outlets().iter().position(|n| n.node == node_id && n.slot == ix)
{
format!(
"{} {}",
Yellow.bold().paint(format!("MODEL OUTPUT #{}", id)).to_string(),
tags.model_output.as_ref().map(|s| &**s).unwrap_or("")
)
} else {
"".to_string()
};
let outlet = OutletId::new(node_id, ix);
let successors = model.outlet_successors(outlet);
prefix!();
println!(
" {} output fact #{}: {} {} {}",
star,
ix,
model.outlet_fact_format(outlet),
White.bold().paint(successors.iter().map(|s| format!("{:?}", s)).join(" ")),
io
);
if options.outlet_labels {
if let Some(label) = model.outlet_label(OutletId::new(node_id, ix)) {
prefix!();
println!(" {} ", White.italic().paint(label));
}
}
}
}
Io::Short => {
let same = model.node_inputs(node_id).len() > 0
&& model.node_output_count(node_id) == 1
&& model.outlet_fact_format(node_id.into())
== model.outlet_fact_format(model.node_inputs(node_id)[0]);
if !same {
let style = drawing_state
.and_then(|w| w.wires.last())
.and_then(|w| w.color)
.unwrap_or(White.into());
for ix in 0..model.node_output_count(node_id) {
prefix!();
println!(
" {}{}{} {}",
style.paint(box_drawing::heavy::HORIZONTAL),
style.paint(box_drawing::heavy::HORIZONTAL),
style.paint(box_drawing::heavy::HORIZONTAL),
model.outlet_fact_format((node_id, ix).into())
);
}
}
}
Io::None => (),
}
if options.info {
for info in model.node_op(node_id).info()? {
prefix!();
println!(" * {}", info);
}
}
if options.invariants {
if let Some(typed) = model.downcast_ref::<TypedModel>() {
let node = typed.node(node_id);
prefix!();
println!(" * {:?}", node.op().as_typed().unwrap().invariants(&typed, &node)?);
}
}
if options.debug_op {
prefix!();
println!(" * {:?}", model.node_op(node_id));
}
for section in tags.sections {
if section.is_empty() {
continue;
}
prefix!();
println!(" * {}", section[0]);
for s in §ion[1..] {
prefix!();
println!(" {}", s);
}
}
for (label, sub, _, _) in model.nested_models(node_id) {
let prefix = drawing_lines.next().unwrap();
let mut scope: TVec<_> = scope.into();
scope.push((node_id, label.to_string()));
render_prefixed(
sub,
&format!("{} [{}] ", prefix, label),
&*scope,
annotations,
options,
)?
}
while cost_column.as_mut().map(|cost| cost.peek().is_some()).unwrap_or(false) {
prefix!();
println!("");
}
Ok(())
}
pub fn render_summaries(
model: &dyn Model,
annotations: &Annotations,
options: &DisplayParams,
) -> CliResult<()> {
let total = annotations.tags.values().sum::<NodeTags>();
if options.cost {
println!("{}", White.bold().paint("Cost summary"));
for (c, i) in &total.cost {
println!(" * {:?}: {}", c, render_tdim(i));
}
}
if options.profile {
let summary = annotations.profile_summary.as_ref().unwrap();
println!("{}", White.bold().paint("Most time consuming operations"));
for (op, (dur, n)) in annotations
.tags
.iter()
.map(|(k, v)| {
(
k.model(model).unwrap().node_op(k.1).name(),
v.profile.unwrap_or(Duration::default()),
)
})
.sorted_by_key(|a| a.0.to_string())
.group_by(|(n, _)| n.clone())
.into_iter()
.map(|(a, group)| {
(
a,
group
.into_iter()
.fold((Duration::default(), 0), |acc, d| (acc.0 + d.1, acc.1 + 1)),
)
})
.into_iter()
.sorted_by_key(|(_, d)| d.0)
.rev()
{
println!(
" * {} {:3} nodes: {}",
Blue.bold().paint(format!("{:20}", op)),
n,
dur_avg_ratio(dur, summary.sum)
);
}
println!("{}", White.bold().paint("By prefix"));
fn prefixes_for(s: &str) -> impl Iterator<Item = String> + '_ {
use tract_itertools::*;
let split = s.split(".").count();
(0..split).map(move |n| s.split(".").take(n).join("."))
}
let all_prefixes = annotations
.tags
.keys()
.flat_map(|id| prefixes_for(id.model(model).unwrap().node_name(id.1)))
.filter(|s| s.len() > 0)
.sorted()
.unique()
.collect::<Vec<String>>();
for prefix in &all_prefixes {
let sum = annotations
.tags
.iter()
.filter(|(k, _v)| k.model(model).unwrap().node_name(k.1).starts_with(prefix))
.map(|(_k,v)| v)
.sum::<NodeTags>();
if sum.profile.unwrap_or(Duration::default()).as_secs_f64() / summary.entire.as_secs_f64() < 0.01 {
continue;
}
print!("{} ", dur_avg_ratio(sum.profile.unwrap_or(Duration::default()), summary.sum));
for _ in prefix.chars().filter(|c| *c == '.') {
print!(" ");
}
println!("{}", prefix);
}
println!(
"Not accounted by ops: {}",
dur_avg_ratio(summary.entire - summary.sum.min(summary.entire), summary.entire)
);
println!("Entire network performance: {}", dur_avg(summary.entire));
}
Ok(())
}
/// Format a rusage::Duration showing avgtime in ms.
pub fn dur_avg(measure: Duration) -> String {
White.bold().paint(format!("{:.3} ms/i", measure.as_secs_f64() * 1e3)).to_string()
}
/// Format a rusage::Duration showing avgtime in ms, with percentage to a global
/// one.
pub fn dur_avg_ratio(measure: Duration, global: Duration) -> String {
format!(
"{} {}",
White.bold().paint(format!("{:7.3} ms/i", measure.as_secs_f64() * 1e3)),
Yellow
.bold()
.paint(format!("{:>4.1}%", measure.as_secs_f64() / global.as_secs_f64() * 100.)),
)
}
fn render_tdim(d: &TDim) -> ANSIString<'static> {
if let Ok(i) = d.to_integer() {
render_big_integer(i as i64)
} else {
d.to_string().into()
}
}
fn render_big_integer(i: i64) -> ansi_term::ANSIString<'static> {
let raw = i.to_string();
let mut blocks = raw
.chars()
.rev()
.chunks(3)
.into_iter()
.map(|mut c| c.join("").chars().rev().join(""))
.enumerate()
.map(|(ix, s)| if ix % 2 == 1 { White.bold().paint(s).to_string() } else { s })
.collect::<Vec<_>>();
blocks.reverse();
blocks.into_iter().join("").into()
}
| 34.453461 | 111 | 0.478734 |
118bff8144c7f6d33d178375eaed8e4d01b0cccb | 5,789 | #![feature(half_open_range_patterns)]
fn main() {}
#[cfg(FALSE)] fn e() { let _ = box #![attr] 0; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = [#[attr]]; }
//~^ ERROR expected expression, found `]`
#[cfg(FALSE)] fn e() { let _ = foo#[attr](); }
//~^ ERROR expected one of
#[cfg(FALSE)] fn e() { let _ = foo(#![attr]); }
//~^ ERROR an inner attribute is not permitted in this context
//~| ERROR expected expression, found `)`
#[cfg(FALSE)] fn e() { let _ = x.foo(#![attr]); }
//~^ ERROR an inner attribute is not permitted in this context
//~| ERROR expected expression, found `)`
#[cfg(FALSE)] fn e() { let _ = 0 + #![attr] 0; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = !#![attr] 0; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = -#![attr] 0; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = x #![attr] as Y; }
//~^ ERROR expected one of
#[cfg(FALSE)] fn e() { let _ = || #![attr] foo; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = move || #![attr] foo; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = || #![attr] {foo}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = move || #![attr] {foo}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = #[attr] ..#[attr] 0; }
//~^ ERROR expected expression, found `..`
#[cfg(FALSE)] fn e() { let _ = #[attr] ..; }
//~^ ERROR expected expression, found `..`
#[cfg(FALSE)] fn e() { let _ = #[attr] &#![attr] 0; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = #[attr] &mut #![attr] 0; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = #[attr] if 0 {}; }
//~^ ERROR attributes are not yet allowed on `if` expressions
#[cfg(FALSE)] fn e() { let _ = if 0 #[attr] {}; }
//~^ ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if 0 {#![attr]}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = if 0 {} #[attr] else {}; }
//~^ ERROR expected one of
#[cfg(FALSE)] fn e() { let _ = if 0 {} else #[attr] {}; }
//~^ ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if 0 {} else {#![attr]}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = if 0 {} else #[attr] if 0 {}; }
//~^ ERROR attributes are not yet allowed on `if` expressions
//~| ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if 0 {} else if 0 #[attr] {}; }
//~^ ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if 0 {} else if 0 {#![attr]}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = #[attr] if let _ = 0 {}; }
//~^ ERROR attributes are not yet allowed on `if` expressions
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 #[attr] {}; }
//~^ ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {#![attr]}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {} #[attr] else {}; }
//~^ ERROR expected one of
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {} else #[attr] {}; }
//~^ ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {} else {#![attr]}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {} else #[attr] if let _ = 0 {}; }
//~^ ERROR attributes are not yet allowed on `if` expressions
//~| ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {} else if let _ = 0 #[attr] {}; }
//~^ ERROR expected `{`, found `#`
#[cfg(FALSE)] fn e() { let _ = if let _ = 0 {} else if let _ = 0 {#![attr]}; }
//~^ ERROR an inner attribute is not permitted in this context
#[cfg(FALSE)] fn s() { #[attr] #![attr] let _ = 0; }
//~^ ERROR an inner attribute is not permitted following an outer attribute
#[cfg(FALSE)] fn s() { #[attr] #![attr] 0; }
//~^ ERROR an inner attribute is not permitted following an outer attribute
#[cfg(FALSE)] fn s() { #[attr] #![attr] foo!(); }
//~^ ERROR an inner attribute is not permitted following an outer attribute
#[cfg(FALSE)] fn s() { #[attr] #![attr] foo![]; }
//~^ ERROR an inner attribute is not permitted following an outer attribute
#[cfg(FALSE)] fn s() { #[attr] #![attr] foo!{}; }
//~^ ERROR an inner attribute is not permitted following an outer attribute
// FIXME: Allow attributes in pattern constexprs?
// note: requires parens in patterns to allow disambiguation
#[cfg(FALSE)] fn e() { match 0 { 0..=#[attr] 10 => () } }
//~^ ERROR inclusive range with no end
//~| ERROR expected one of `=>`, `if`, or `|`, found `#`
#[cfg(FALSE)] fn e() { match 0 { 0..=#[attr] -10 => () } }
//~^ ERROR inclusive range with no end
//~| ERROR expected one of `=>`, `if`, or `|`, found `#`
#[cfg(FALSE)] fn e() { match 0 { 0..=-#[attr] 10 => () } }
//~^ ERROR unexpected token: `#`
#[cfg(FALSE)] fn e() { match 0 { 0..=#[attr] FOO => () } }
//~^ ERROR inclusive range with no end
//~| ERROR expected one of `=>`, `if`, or `|`, found `#`
#[cfg(FALSE)] fn e() { let _ = x.#![attr]foo(); }
//~^ ERROR unexpected token: `#`
//~| ERROR expected one of `.`
#[cfg(FALSE)] fn e() { let _ = x.#[attr]foo(); }
//~^ ERROR unexpected token: `#`
//~| ERROR expected one of `.`
// make sure we don't catch this bug again...
#[cfg(FALSE)] fn e() { { fn foo() { #[attr]; } } }
//~^ ERROR expected statement after outer attribute
#[cfg(FALSE)] fn e() { { fn foo() { #[attr] } } }
//~^ ERROR expected statement after outer attribute
| 49.059322 | 78 | 0.585421 |
8f5db187601336bd8e0ed59761df43f242bd25f5 | 5,216 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use byteorder::{BigEndian, WriteBytesExt};
use fidl::encoding::Decodable;
use fidl_fuchsia_media::FormatDetails;
use std::{convert::TryFrom, fs, mem, path::Path};
use stream_processor_test::*;
pub const BEAR_TEST_FILE: &str = "/pkg/data/bear.h264";
const NAL_START_CODE: u32 = 1;
/// Represents an H264 elementary stream.
pub struct H264Stream {
data: Vec<u8>,
}
impl H264Stream {
/// Constructs an H264 elementary stream from a file with raw elementary stream data.
pub fn from_file(filename: impl AsRef<Path>) -> Result<Self> {
Ok(H264Stream::from(fs::read(filename)?))
}
/// Returns an iterator over H264 NALs that does not copy.
fn nal_iter(&self) -> impl Iterator<Item = H264Nal<'_>> {
H264NalIter { data: &self.data, pos: 0 }
}
}
impl From<Vec<u8>> for H264Stream {
fn from(data: Vec<u8>) -> Self {
Self { data }
}
}
impl ElementaryStream for H264Stream {
fn format_details(&self, version_ordinal: u64) -> FormatDetails {
FormatDetails {
format_details_version_ordinal: Some(version_ordinal),
mime_type: Some(String::from("video/h264")),
..<FormatDetails as Decodable>::new_empty()
}
}
fn is_access_units(&self) -> bool {
true
}
fn stream<'a>(&'a self) -> Box<dyn Iterator<Item = ElementaryStreamChunk<'_>> + 'a> {
Box::new(self.nal_iter().map(|nal| ElementaryStreamChunk {
start_access_unit: true,
known_end_access_unit: true,
data: nal.data,
significance: match nal.kind {
H264NalKind::Picture => Significance::Video(VideoSignificance::Picture),
H264NalKind::NotPicture => Significance::Video(VideoSignificance::NotPicture),
},
timestamp: None,
}))
}
}
pub struct H264SeiItuT35 {
pub country_code: u8,
pub country_code_extension: u8,
pub payload: Vec<u8>,
}
impl H264SeiItuT35 {
pub const COUNTRY_CODE_UNITED_STATES: u8 = 0xb5;
pub fn as_bytes(&self) -> Result<Vec<u8>> {
const ITU_T35_PAYLOAD_TYPE: u8 = 4;
let mut bytes = vec![];
bytes.write_u32::<BigEndian>(NAL_START_CODE)?;
bytes.write_u8(H264NalKind::SEI_CODE)?;
bytes.write_u8(ITU_T35_PAYLOAD_TYPE)?;
bytes.write_u8(u8::try_from(self.payload_size())?)?;
bytes.write_u8(self.country_code)?;
bytes.write_u8(self.country_code_extension)?;
bytes.append(&mut self.payload.clone());
Ok(bytes)
}
fn payload_size(&self) -> usize {
mem::size_of::<u8>() + mem::size_of::<u8>() + self.payload.len()
}
}
pub struct H264Nal<'a> {
pub kind: H264NalKind,
pub data: &'a [u8],
}
pub enum H264NalKind {
Picture,
NotPicture,
}
impl H264NalKind {
const NON_IDR_PICTURE_CODE: u8 = 1;
const IDR_PICTURE_CODE: u8 = 5;
const SEI_CODE: u8 = 6;
fn from_header(header: u8) -> Self {
let kind = header & 0xf;
if kind == Self::NON_IDR_PICTURE_CODE || kind == Self::IDR_PICTURE_CODE {
H264NalKind::Picture
} else {
H264NalKind::NotPicture
}
}
}
struct H264NalStart<'a> {
/// Position in the h264 stream of the start.
pos: usize,
/// All the data from the start of the NAL onward.
data: &'a [u8],
kind: H264NalKind,
}
/// An iterator over NALs in an H264 stream.
struct H264NalIter<'a> {
data: &'a [u8],
pos: usize,
}
impl<'a> H264NalIter<'a> {
fn next_nal(&self, pos: usize) -> Option<H264Nal<'a>> {
// This won't need to search if pos already at a start code.
let nal_start = self.next_nal_start(pos)?;
// We search 3 bytes after the found nal's start, because that will
// ensure we don't just find the same start code again.
match self.next_nal_start(nal_start.pos + 3) {
Some(next_start) => Some(H264Nal {
kind: nal_start.kind,
data: &nal_start.data[0..(next_start.pos - nal_start.pos)],
}),
None => Some(H264Nal { kind: nal_start.kind, data: nal_start.data }),
}
}
fn next_nal_start(&self, pos: usize) -> Option<H264NalStart<'a>> {
// This search size will find 3 and 4 byte start codes, and the
// header value.
const NAL_SEARCH_SIZE: usize = 5;
let data = self.data.get(pos..)?;
data.windows(NAL_SEARCH_SIZE).enumerate().find_map(|(i, candidate)| match candidate {
[0, 0, 0, 1, h] | [0, 0, 1, h, _] => Some(H264NalStart {
pos: i + pos,
data: data.get(i..).expect("Getting slice starting where we just matched"),
kind: H264NalKind::from_header(*h),
}),
_ => None,
})
}
}
impl<'a> Iterator for H264NalIter<'a> {
type Item = H264Nal<'a>;
fn next(&mut self) -> Option<Self::Item> {
let nal = self.next_nal(self.pos);
self.pos += nal.as_ref().map(|n| n.data.len()).unwrap_or(0);
nal
}
}
| 30.502924 | 94 | 0.602186 |
1dec1a9e4264f84f71f70c70a6de6cd7481e8c6a | 382 | //! The `getgid` syscall returns the GID of the process's owner.
use crate::errno::Errno;
use crate::process::Process;
use crate::process::Regs;
/// The implementation of the `getgid` syscall.
pub fn getgid(_: &Regs) -> Result<i32, Errno> {
let mutex = Process::get_current().unwrap();
let mut guard = mutex.lock(false);
let proc = guard.get_mut();
Ok(proc.get_gid() as _)
}
| 25.466667 | 64 | 0.688482 |
fe04d140f29f9db3a9e5882c15cc1dc4e54d9da3 | 2,431 | #![windows_subsystem = "windows"]
extern crate native_windows_derive as nwd;
extern crate native_windows_gui as nwg;
use {
nwd::NwgUi,
nwg::NativeUi,
outcrop::{config, injection},
};
#[derive(Default, NwgUi)]
pub struct Outcrop {
#[nwg_control(size: (500, 200), position: (300, 300), title: "OUTCROP", flags: "WINDOW|VISIBLE")]
#[nwg_events( OnWindowClose: [Outcrop::exit_program] )]
window: nwg::Window,
#[nwg_control(text: "Outcrop: A BDS dll mod loader and injector. Made by Luke7720 mainly because \
Windows Defender tries to remove Xenos repeatedly and using Xenos was also confusing to some people. \n\
---------------------------------------------------------------------------------------------\
---------------------------
", size: (470, 70), position: (10, 10))]
label: nwg::Label,
#[nwg_control(text: "Select DLLs to inject", size: (235, 25), position: (10, 80))]
label2: nwg::Label,
#[nwg_control(text: "", size: (235, 25), position: (10, 110))]
dll_path: nwg::TextInput,
#[nwg_control(text: "See DLL list: ", size: (280, 25), position: (250, 80))]
label3: nwg::Label,
#[nwg_control(text: "Available mods", size: (235, 25), position: (250, 110))]
#[nwg_events( OnButtonClick: [Outcrop::list] )]
mod_list: nwg::Button,
#[nwg_control(text: "Inject", size: (480, 30), position: (10, 150))]
#[nwg_events( OnButtonClick: [Outcrop::inject] )]
inject: nwg::Button,
}
impl Outcrop {
fn inject(&self) {
let dll = String::from(&self.dll_path.text());
injection::mod_inject(&dll);
}
fn list(&self) {
let dll_list = injection::dll_map();
let mut dlls: String = String::from(" ");
for (key, dll) in &dll_list {
dlls.push_str(&format!("{}: {}\n", key, dll));
}
nwg::simple_message("Completed", &format!("{}", dlls));
}
fn exit_program(&self) {
nwg::stop_thread_dispatch();
}
}
fn main() {
let cfg_exists = config::cfg_manager();
if cfg_exists {
if !config::read_cfg() {
load_gui();
} else {
injection::inject_all();
}
} else {
load_gui();
}
}
fn load_gui() {
nwg::init().expect("Failed to init Native Windows GUI");
let _app = Outcrop::build_ui(Default::default()).expect("Failed to build UI");
nwg::dispatch_thread_events();
}
| 27.942529 | 108 | 0.564788 |
f714c22240238b1b30b4a3eb40b84c2a9bc015e3 | 1,487 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::DSTBY_INFO1 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
| 22.876923 | 59 | 0.497646 |
feb2c73ccb2900b72c20265bdc4e055a169e8666 | 1,786 | use cargo::ops;
use cargo::util::{CliResult, Config};
#[derive(RustcDecodable)]
pub struct Options {
arg_crate: Option<String>,
flag_token: Option<String>,
flag_vers: Option<String>,
flag_index: Option<String>,
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
flag_undo: bool,
}
pub static USAGE: &'static str = "
Remove a pushed crate from the index
Usage:
cargo yank [options] [<crate>]
Options:
-h, --help Print this message
--vers VERSION The version to yank or un-yank
--undo Undo a yank, putting a version back into the index
--index INDEX Registry index to yank from
--token TOKEN API token to use when authenticating
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
The yank command removes a previously pushed crate's version from the server's
index. This command does not delete any data, and the crate will still be
available for download via the registry's download link.
Note that existing crates locked to a yanked version will still be able to
download the yanked version to use it. Cargo will, however, not allow any new
crates to be locked to any yanked version.
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
try!(ops::yank(config,
options.arg_crate,
options.flag_vers,
options.flag_token,
options.flag_index,
options.flag_undo));
Ok(None)
}
| 33.698113 | 87 | 0.657335 |
e4bb003d0bca9ed739e63de06127b46ed1f95b2d | 37,946 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Arrow IPC File and Stream Readers
//!
//! The `FileReader` and `StreamReader` have similar interfaces,
//! however the `FileReader` expects a reader that supports `Seek`ing
use std::collections::HashMap;
use std::io::{BufReader, Read, Seek, SeekFrom};
use std::sync::Arc;
use crate::array::*;
use crate::buffer::Buffer;
use crate::compute::cast;
use crate::datatypes::{DataType, Field, IntervalUnit, Schema, SchemaRef};
use crate::error::{ArrowError, Result};
use crate::ipc;
use crate::record_batch::{RecordBatch, RecordBatchReader};
use ipc::CONTINUATION_MARKER;
use DataType::*;
/// Read a buffer based on offset and length
fn read_buffer(buf: &ipc::Buffer, a_data: &[u8]) -> Buffer {
let start_offset = buf.offset() as usize;
let end_offset = start_offset + buf.length() as usize;
let buf_data = &a_data[start_offset..end_offset];
Buffer::from(&buf_data)
}
/// Coordinates reading arrays based on data types.
///
/// Notes:
/// * In the IPC format, null buffers are always set, but may be empty. We discard them if an array has 0 nulls
/// * Numeric values inside list arrays are often stored as 64-bit values regardless of their data type size.
/// We thus:
/// - check if the bit width of non-64-bit numbers is 64, and
/// - read the buffer as 64-bit (signed integer or float), and
/// - cast the 64-bit array to the appropriate data type
fn create_array(
nodes: &[ipc::FieldNode],
data_type: &DataType,
data: &[u8],
buffers: &[ipc::Buffer],
dictionaries: &[Option<ArrayRef>],
mut node_index: usize,
mut buffer_index: usize,
) -> (ArrayRef, usize, usize) {
use DataType::*;
let array = match data_type {
Utf8 | Binary | LargeBinary | LargeUtf8 => {
let array = create_primitive_array(
&nodes[node_index],
data_type,
buffers[buffer_index..buffer_index + 3]
.iter()
.map(|buf| read_buffer(buf, data))
.collect(),
);
node_index += 1;
buffer_index += 3;
array
}
FixedSizeBinary(_) => {
let array = create_primitive_array(
&nodes[node_index],
data_type,
buffers[buffer_index..buffer_index + 2]
.iter()
.map(|buf| read_buffer(buf, data))
.collect(),
);
node_index += 1;
buffer_index += 2;
array
}
List(ref list_data_type) | LargeList(ref list_data_type) => {
let list_node = &nodes[node_index];
let list_buffers: Vec<Buffer> = buffers[buffer_index..buffer_index + 2]
.iter()
.map(|buf| read_buffer(buf, data))
.collect();
node_index += 1;
buffer_index += 2;
let triple = create_array(
nodes,
list_data_type,
data,
buffers,
dictionaries,
node_index,
buffer_index,
);
node_index = triple.1;
buffer_index = triple.2;
create_list_array(list_node, data_type, &list_buffers[..], triple.0)
}
FixedSizeList(ref list_data_type, _) => {
let list_node = &nodes[node_index];
let list_buffers: Vec<Buffer> = buffers[buffer_index..=buffer_index]
.iter()
.map(|buf| read_buffer(buf, data))
.collect();
node_index += 1;
buffer_index += 1;
let triple = create_array(
nodes,
list_data_type,
data,
buffers,
dictionaries,
node_index,
buffer_index,
);
node_index = triple.1;
buffer_index = triple.2;
create_list_array(list_node, data_type, &list_buffers[..], triple.0)
}
Struct(struct_fields) => {
let struct_node = &nodes[node_index];
let null_buffer: Buffer = read_buffer(&buffers[buffer_index], data);
node_index += 1;
buffer_index += 1;
// read the arrays for each field
let mut struct_arrays = vec![];
// TODO investigate whether just knowing the number of buffers could
// still work
for struct_field in struct_fields {
let triple = create_array(
nodes,
struct_field.data_type(),
data,
buffers,
dictionaries,
node_index,
buffer_index,
);
node_index = triple.1;
buffer_index = triple.2;
struct_arrays.push((struct_field.clone(), triple.0));
}
let null_count = struct_node.null_count() as usize;
let struct_array = if null_count > 0 {
// create struct array from fields, arrays and null data
StructArray::from((
struct_arrays,
null_buffer,
struct_node.null_count() as usize,
))
} else {
StructArray::from(struct_arrays)
};
Arc::new(struct_array)
}
// Create dictionary array from RecordBatch
Dictionary(_, _) => {
let index_node = &nodes[node_index];
let index_buffers: Vec<Buffer> = buffers[buffer_index..buffer_index + 2]
.iter()
.map(|buf| read_buffer(buf, data))
.collect();
let value_array = dictionaries[node_index].clone().unwrap();
node_index += 1;
buffer_index += 2;
create_dictionary_array(
index_node,
data_type,
&index_buffers[..],
value_array,
)
}
Null => {
let length = nodes[node_index].length() as usize;
let data = ArrayData::builder(data_type.clone())
.len(length)
.offset(0)
.build();
node_index += 1;
// no buffer increases
make_array(data)
}
_ => {
let array = create_primitive_array(
&nodes[node_index],
data_type,
buffers[buffer_index..buffer_index + 2]
.iter()
.map(|buf| read_buffer(buf, data))
.collect(),
);
node_index += 1;
buffer_index += 2;
array
}
};
(array, node_index, buffer_index)
}
/// Reads the correct number of buffers based on data type and null_count, and creates a
/// primitive array ref
fn create_primitive_array(
field_node: &ipc::FieldNode,
data_type: &DataType,
buffers: Vec<Buffer>,
) -> ArrayRef {
let length = field_node.length() as usize;
let null_count = field_node.null_count() as usize;
let array_data = match data_type {
Utf8 | Binary | LargeBinary | LargeUtf8 => {
// read 3 buffers
let mut builder = ArrayData::builder(data_type.clone())
.len(length)
.buffers(buffers[1..3].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
builder.build()
}
FixedSizeBinary(_) => {
// read 3 buffers
let mut builder = ArrayData::builder(data_type.clone())
.len(length)
.buffers(buffers[1..2].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
builder.build()
}
Int8
| Int16
| Int32
| UInt8
| UInt16
| UInt32
| Time32(_)
| Date32(_)
| Interval(IntervalUnit::YearMonth) => {
if buffers[1].len() / 8 == length && length != 1 {
// interpret as a signed i64, and cast appropriately
let mut builder = ArrayData::builder(DataType::Int64)
.len(length)
.buffers(buffers[1..].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
let values = Arc::new(Int64Array::from(builder.build())) as ArrayRef;
// this cast is infallible, the unwrap is safe
let casted = cast(&values, data_type).unwrap();
casted.data()
} else {
let mut builder = ArrayData::builder(data_type.clone())
.len(length)
.buffers(buffers[1..].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
builder.build()
}
}
Float32 => {
if buffers[1].len() / 8 == length && length != 1 {
// interpret as a f64, and cast appropriately
let mut builder = ArrayData::builder(DataType::Float64)
.len(length)
.buffers(buffers[1..].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
let values = Arc::new(Float64Array::from(builder.build())) as ArrayRef;
// this cast is infallible, the unwrap is safe
let casted = cast(&values, data_type).unwrap();
casted.data()
} else {
let mut builder = ArrayData::builder(data_type.clone())
.len(length)
.buffers(buffers[1..].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
builder.build()
}
}
Boolean
| Int64
| UInt64
| Float64
| Time64(_)
| Timestamp(_, _)
| Date64(_)
| Duration(_)
| Interval(IntervalUnit::DayTime) => {
let mut builder = ArrayData::builder(data_type.clone())
.len(length)
.buffers(buffers[1..].to_vec())
.offset(0);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
builder.build()
}
t => panic!("Data type {:?} either unsupported or not primitive", t),
};
make_array(array_data)
}
/// Reads the correct number of buffers based on list type and null_count, and creates a
/// list array ref
fn create_list_array(
field_node: &ipc::FieldNode,
data_type: &DataType,
buffers: &[Buffer],
child_array: ArrayRef,
) -> ArrayRef {
if let DataType::List(_) = *data_type {
let null_count = field_node.null_count() as usize;
let mut builder = ArrayData::builder(data_type.clone())
.len(field_node.length() as usize)
.buffers(buffers[1..2].to_vec())
.offset(0)
.child_data(vec![child_array.data()]);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
make_array(builder.build())
} else if let DataType::FixedSizeList(_, _) = *data_type {
let null_count = field_node.null_count() as usize;
let mut builder = ArrayData::builder(data_type.clone())
.len(field_node.length() as usize)
.buffers(buffers[1..1].to_vec())
.offset(0)
.child_data(vec![child_array.data()]);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
make_array(builder.build())
} else {
panic!("Cannot create list array from {:?}", data_type)
}
}
/// Reads the correct number of buffers based on list type and null_count, and creates a
/// list array ref
fn create_dictionary_array(
field_node: &ipc::FieldNode,
data_type: &DataType,
buffers: &[Buffer],
value_array: ArrayRef,
) -> ArrayRef {
if let DataType::Dictionary(_, _) = *data_type {
let null_count = field_node.null_count() as usize;
let mut builder = ArrayData::builder(data_type.clone())
.len(field_node.length() as usize)
.buffers(buffers[1..2].to_vec())
.offset(0)
.child_data(vec![value_array.data()]);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(buffers[0].clone())
}
make_array(builder.build())
} else {
unreachable!("Cannot create dictionary array from {:?}", data_type)
}
}
/// Creates a record batch from binary data using the `ipc::RecordBatch` indexes and the `Schema`
pub fn read_record_batch(
buf: &[u8],
batch: ipc::RecordBatch,
schema: SchemaRef,
dictionaries: &[Option<ArrayRef>],
) -> Result<RecordBatch> {
let buffers = batch.buffers().ok_or_else(|| {
ArrowError::IoError("Unable to get buffers from IPC RecordBatch".to_string())
})?;
let field_nodes = batch.nodes().ok_or_else(|| {
ArrowError::IoError("Unable to get field nodes from IPC RecordBatch".to_string())
})?;
// keep track of buffer and node index, the functions that create arrays mutate these
let mut buffer_index = 0;
let mut node_index = 0;
let mut arrays = vec![];
// keep track of index as lists require more than one node
for field in schema.fields() {
let triple = create_array(
field_nodes,
field.data_type(),
&buf,
buffers,
dictionaries,
node_index,
buffer_index,
);
node_index = triple.1;
buffer_index = triple.2;
arrays.push(triple.0);
}
RecordBatch::try_new(schema, arrays)
}
/// Read the dictionary from the buffer and provided metadata,
/// updating the `dictionaries_by_field` with the resulting dictionary
fn read_dictionary(
buf: &[u8],
batch: ipc::DictionaryBatch,
ipc_schema: &ipc::Schema,
schema: &Schema,
dictionaries_by_field: &mut [Option<ArrayRef>],
) -> Result<()> {
if batch.isDelta() {
return Err(ArrowError::IoError(
"delta dictionary batches not supported".to_string(),
));
}
let id = batch.id();
// As the dictionary batch does not contain the type of the
// values array, we need to retrieve this from the schema.
let first_field = find_dictionary_field(ipc_schema, id).ok_or_else(|| {
ArrowError::InvalidArgumentError("dictionary id not found in schema".to_string())
})?;
// Get an array representing this dictionary's values.
let dictionary_values: ArrayRef = match schema.field(first_field).data_type() {
DataType::Dictionary(_, ref value_type) => {
// Make a fake schema for the dictionary batch.
let schema = Schema {
fields: vec![Field::new("", value_type.as_ref().clone(), false)],
metadata: HashMap::new(),
};
// Read a single column
let record_batch = read_record_batch(
&buf,
batch.data().unwrap(),
Arc::new(schema),
&dictionaries_by_field,
)?;
Some(record_batch.column(0).clone())
}
_ => None,
}
.ok_or_else(|| {
ArrowError::InvalidArgumentError("dictionary id not found in schema".to_string())
})?;
// for all fields with this dictionary id, update the dictionaries vector
// in the reader. Note that a dictionary batch may be shared between many fields.
// We don't currently record the isOrdered field. This could be general
// attributes of arrays.
let fields = ipc_schema.fields().unwrap();
for (i, field) in fields.iter().enumerate() {
if let Some(dictionary) = field.dictionary() {
if dictionary.id() == id {
// Add (possibly multiple) array refs to the dictionaries array.
dictionaries_by_field[i] = Some(dictionary_values.clone());
}
}
}
Ok(())
}
// Linear search for the first dictionary field with a dictionary id.
fn find_dictionary_field(ipc_schema: &ipc::Schema, id: i64) -> Option<usize> {
let fields = ipc_schema.fields().unwrap();
for i in 0..fields.len() {
let field: ipc::Field = fields.get(i);
if let Some(dictionary) = field.dictionary() {
if dictionary.id() == id {
return Some(i);
}
}
}
None
}
/// Arrow File reader
pub struct FileReader<R: Read + Seek> {
/// Buffered file reader that supports reading and seeking
reader: BufReader<R>,
/// The schema that is read from the file header
schema: SchemaRef,
/// The blocks in the file
///
/// A block indicates the regions in the file to read to get data
blocks: Vec<ipc::Block>,
/// A counter to keep track of the current block that should be read
current_block: usize,
/// The total number of blocks, which may contain record batches and other types
total_blocks: usize,
/// Optional dictionaries for each schema field.
///
/// Dictionaries may be appended to in the streaming format.
dictionaries_by_field: Vec<Option<ArrayRef>>,
/// Metadata version
metadata_version: ipc::MetadataVersion,
}
impl<R: Read + Seek> FileReader<R> {
/// Try to create a new file reader
///
/// Returns errors if the file does not meet the Arrow Format header and footer
/// requirements
pub fn try_new(reader: R) -> Result<Self> {
let mut reader = BufReader::new(reader);
// check if header and footer contain correct magic bytes
let mut magic_buffer: [u8; 6] = [0; 6];
reader.read_exact(&mut magic_buffer)?;
if magic_buffer != super::ARROW_MAGIC {
return Err(ArrowError::IoError(
"Arrow file does not contain correct header".to_string(),
));
}
reader.seek(SeekFrom::End(-6))?;
reader.read_exact(&mut magic_buffer)?;
if magic_buffer != super::ARROW_MAGIC {
return Err(ArrowError::IoError(
"Arrow file does not contain correct footer".to_string(),
));
}
// read footer length
let mut footer_size: [u8; 4] = [0; 4];
reader.seek(SeekFrom::End(-10))?;
reader.read_exact(&mut footer_size)?;
let footer_len = i32::from_le_bytes(footer_size);
// read footer
let mut footer_data = vec![0; footer_len as usize];
reader.seek(SeekFrom::End(-10 - footer_len as i64))?;
reader.read_exact(&mut footer_data)?;
let footer = ipc::get_root_as_footer(&footer_data[..]);
let blocks = footer.recordBatches().ok_or_else(|| {
ArrowError::IoError(
"Unable to get record batches from IPC Footer".to_string(),
)
})?;
let total_blocks = blocks.len();
let ipc_schema = footer.schema().unwrap();
let schema = ipc::convert::fb_to_schema(ipc_schema);
// Create an array of optional dictionary value arrays, one per field.
let mut dictionaries_by_field = vec![None; schema.fields().len()];
for block in footer.dictionaries().unwrap() {
// read length from end of offset
// TODO: ARROW-9848: dictionary metadata has not been tested
let meta_len = block.metaDataLength() - 4;
let mut block_data = vec![0; meta_len as usize];
reader.seek(SeekFrom::Start(block.offset() as u64 + 4))?;
reader.read_exact(&mut block_data)?;
let message = ipc::get_root_as_message(&block_data[..]);
match message.header_type() {
ipc::MessageHeader::DictionaryBatch => {
let batch = message.header_as_dictionary_batch().unwrap();
// read the block that makes up the dictionary batch into a buffer
let mut buf = vec![0; block.bodyLength() as usize];
reader.seek(SeekFrom::Start(
block.offset() as u64 + block.metaDataLength() as u64,
))?;
reader.read_exact(&mut buf)?;
read_dictionary(
&buf,
batch,
&ipc_schema,
&schema,
&mut dictionaries_by_field,
)?;
}
_ => {
return Err(ArrowError::IoError(
"Expecting DictionaryBatch in dictionary blocks.".to_string(),
))
}
};
}
Ok(Self {
reader,
schema: Arc::new(schema),
blocks: blocks.to_vec(),
current_block: 0,
total_blocks,
dictionaries_by_field,
metadata_version: footer.version(),
})
}
/// Return the number of batches in the file
pub fn num_batches(&self) -> usize {
self.total_blocks
}
/// Return the schema of the file
pub fn schema(&self) -> SchemaRef {
self.schema.clone()
}
/// Read a specific record batch
///
/// Sets the current block to the index, allowing random reads
pub fn set_index(&mut self, index: usize) -> Result<()> {
if index >= self.total_blocks {
Err(ArrowError::IoError(format!(
"Cannot set batch to index {} from {} total batches",
index, self.total_blocks
)))
} else {
self.current_block = index;
Ok(())
}
}
fn maybe_next(&mut self) -> Result<Option<RecordBatch>> {
let block = self.blocks[self.current_block];
self.current_block += 1;
// read length
self.reader.seek(SeekFrom::Start(block.offset() as u64))?;
let mut meta_buf = [0; 4];
self.reader.read_exact(&mut meta_buf)?;
if meta_buf == CONTINUATION_MARKER {
// continuation marker encountered, read message next
self.reader.read_exact(&mut meta_buf)?;
}
let meta_len = i32::from_le_bytes(meta_buf);
let mut block_data = vec![0; meta_len as usize];
self.reader.read_exact(&mut block_data)?;
let message = ipc::get_root_as_message(&block_data[..]);
// some old test data's footer metadata is not set, so we account for that
if self.metadata_version != ipc::MetadataVersion::V1
&& message.version() != self.metadata_version
{
return Err(ArrowError::IoError(
"Could not read IPC message as metadata versions mismatch".to_string(),
));
}
match message.header_type() {
ipc::MessageHeader::Schema => Err(ArrowError::IoError(
"Not expecting a schema when messages are read".to_string(),
)),
ipc::MessageHeader::RecordBatch => {
let batch = message.header_as_record_batch().ok_or_else(|| {
ArrowError::IoError(
"Unable to read IPC message as record batch".to_string(),
)
})?;
// read the block that makes up the record batch into a buffer
let mut buf = vec![0; block.bodyLength() as usize];
self.reader.seek(SeekFrom::Start(
block.offset() as u64 + block.metaDataLength() as u64,
))?;
self.reader.read_exact(&mut buf)?;
read_record_batch(
&buf,
batch,
self.schema(),
&self.dictionaries_by_field,
).map(Some)
}
ipc::MessageHeader::NONE => {
Ok(None)
}
t => Err(ArrowError::IoError(format!(
"Reading types other than record batches not yet supported, unable to read {:?}", t
))),
}
}
}
impl<R: Read + Seek> Iterator for FileReader<R> {
type Item = Result<RecordBatch>;
fn next(&mut self) -> Option<Self::Item> {
// get current block
if self.current_block < self.total_blocks {
self.maybe_next().transpose()
} else {
None
}
}
}
impl<R: Read + Seek> RecordBatchReader for FileReader<R> {
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
}
/// Arrow Stream reader
pub struct StreamReader<R: Read> {
/// Buffered stream reader
reader: BufReader<R>,
/// The schema that is read from the stream's first message
schema: SchemaRef,
/// The bytes of the IPC schema that is read from the stream's first message
///
/// This is kept in order to interpret dictionary data
ipc_schema: Vec<u8>,
/// Optional dictionaries for each schema field.
///
/// Dictionaries may be appended to in the streaming format.
dictionaries_by_field: Vec<Option<ArrayRef>>,
/// An indicator of whether the stream is complete.
///
/// This value is set to `true` the first time the reader's `next()` returns `None`.
finished: bool,
}
impl<R: Read> StreamReader<R> {
/// Try to create a new stream reader
///
/// The first message in the stream is the schema, the reader will fail if it does not
/// encounter a schema.
/// To check if the reader is done, use `is_finished(self)`
pub fn try_new(reader: R) -> Result<Self> {
let mut reader = BufReader::new(reader);
// determine metadata length
let mut meta_size: [u8; 4] = [0; 4];
reader.read_exact(&mut meta_size)?;
let meta_len = {
// If a continuation marker is encountered, skip over it and read
// the size from the next four bytes.
if meta_size == CONTINUATION_MARKER {
reader.read_exact(&mut meta_size)?;
}
i32::from_le_bytes(meta_size)
};
let mut meta_buffer = vec![0; meta_len as usize];
reader.read_exact(&mut meta_buffer)?;
let message = ipc::get_root_as_message(meta_buffer.as_slice());
// message header is a Schema, so read it
let ipc_schema: ipc::Schema = message.header_as_schema().ok_or_else(|| {
ArrowError::IoError("Unable to read IPC message as schema".to_string())
})?;
let schema = ipc::convert::fb_to_schema(ipc_schema);
// Create an array of optional dictionary value arrays, one per field.
let dictionaries_by_field = vec![None; schema.fields().len()];
Ok(Self {
reader,
schema: Arc::new(schema),
ipc_schema: meta_buffer,
finished: false,
dictionaries_by_field,
})
}
/// Return the schema of the stream
pub fn schema(&self) -> SchemaRef {
self.schema.clone()
}
/// Check if the stream is finished
pub fn is_finished(&self) -> bool {
self.finished
}
fn maybe_next(&mut self) -> Result<Option<RecordBatch>> {
if self.finished {
return Ok(None);
}
// determine metadata length
let mut meta_size: [u8; 4] = [0; 4];
match self.reader.read_exact(&mut meta_size) {
Ok(()) => (),
Err(e) => {
return if e.kind() == std::io::ErrorKind::UnexpectedEof {
// Handle EOF without the "0xFFFFFFFF 0x00000000"
// valid according to:
// https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format
self.finished = true;
Ok(None)
} else {
Err(ArrowError::from(e))
};
}
}
let meta_len = {
// If a continuation marker is encountered, skip over it and read
// the size from the next four bytes.
if meta_size == CONTINUATION_MARKER {
self.reader.read_exact(&mut meta_size)?;
}
i32::from_le_bytes(meta_size)
};
if meta_len == 0 {
// the stream has ended, mark the reader as finished
self.finished = true;
return Ok(None);
}
let mut meta_buffer = vec![0; meta_len as usize];
self.reader.read_exact(&mut meta_buffer)?;
let vecs = &meta_buffer.to_vec();
let message = ipc::get_root_as_message(vecs);
match message.header_type() {
ipc::MessageHeader::Schema => Err(ArrowError::IoError(
"Not expecting a schema when messages are read".to_string(),
)),
ipc::MessageHeader::RecordBatch => {
let batch = message.header_as_record_batch().ok_or_else(|| {
ArrowError::IoError(
"Unable to read IPC message as record batch".to_string(),
)
})?;
// read the block that makes up the record batch into a buffer
let mut buf = vec![0; message.bodyLength() as usize];
self.reader.read_exact(&mut buf)?;
read_record_batch(&buf, batch, self.schema(), &self.dictionaries_by_field).map(Some)
}
ipc::MessageHeader::DictionaryBatch => {
let batch = message.header_as_dictionary_batch().ok_or_else(|| {
ArrowError::IoError(
"Unable to read IPC message as dictionary batch".to_string(),
)
})?;
// read the block that makes up the dictionary batch into a buffer
let mut buf = vec![0; message.bodyLength() as usize];
self.reader.read_exact(&mut buf)?;
let ipc_schema = ipc::get_root_as_message(&self.ipc_schema).header_as_schema()
.ok_or_else(|| {
ArrowError::IoError(
"Unable to read schema from stored message header".to_string(),
)
})?;
read_dictionary(
&buf, batch, &ipc_schema, &self.schema, &mut self.dictionaries_by_field
)?;
// read the next message until we encounter a RecordBatch
self.maybe_next()
}
ipc::MessageHeader::NONE => {
Ok(None)
}
t => Err(ArrowError::IoError(
format!("Reading types other than record batches not yet supported, unable to read {:?} ", t)
)),
}
}
}
impl<R: Read> Iterator for StreamReader<R> {
type Item = Result<RecordBatch>;
fn next(&mut self) -> Option<Self::Item> {
self.maybe_next().transpose()
}
}
impl<R: Read> RecordBatchReader for StreamReader<R> {
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use flate2::read::GzDecoder;
use crate::util::integration_util::*;
use std::env;
use std::fs::File;
#[test]
fn read_generated_files() {
let testdata = env::var("ARROW_TEST_DATA").expect("ARROW_TEST_DATA not defined");
// the test is repetitive, thus we can read all supported files at once
let paths = vec![
"generated_interval",
"generated_datetime",
"generated_dictionary",
"generated_nested",
"generated_primitive_no_batches",
"generated_primitive_zerolength",
"generated_primitive",
];
paths.iter().for_each(|path| {
let file = File::open(format!(
"{}/arrow-ipc-stream/integration/0.14.1/{}.arrow_file",
testdata, path
))
.unwrap();
let mut reader = FileReader::try_new(file).unwrap();
// read expected JSON output
let arrow_json = read_gzip_json(path);
assert!(arrow_json.equals_reader(&mut reader));
});
}
#[test]
fn read_generated_streams() {
let testdata = env::var("ARROW_TEST_DATA").expect("ARROW_TEST_DATA not defined");
// the test is repetitive, thus we can read all supported files at once
let paths = vec![
"generated_interval",
"generated_datetime",
"generated_dictionary",
"generated_nested",
"generated_primitive_no_batches",
"generated_primitive_zerolength",
"generated_primitive",
];
paths.iter().for_each(|path| {
let file = File::open(format!(
"{}/arrow-ipc-stream/integration/0.14.1/{}.stream",
testdata, path
))
.unwrap();
let mut reader = StreamReader::try_new(file).unwrap();
// read expected JSON output
let arrow_json = read_gzip_json(path);
assert!(arrow_json.equals_reader(&mut reader));
// the next batch must be empty
assert!(reader.next().is_none());
// the stream must indicate that it's finished
assert!(reader.is_finished());
});
}
#[test]
fn test_arrow_single_float_row() {
let schema = Schema::new(vec![
Field::new("a", DataType::Float32, false),
Field::new("b", DataType::Float32, false),
Field::new("c", DataType::Int32, false),
Field::new("d", DataType::Int32, false),
]);
let arrays = vec![
Arc::new(Float32Array::from(vec![1.23])) as ArrayRef,
Arc::new(Float32Array::from(vec![-6.50])) as ArrayRef,
Arc::new(Int32Array::from(vec![2])) as ArrayRef,
Arc::new(Int32Array::from(vec![1])) as ArrayRef,
];
let batch = RecordBatch::try_new(Arc::new(schema.clone()), arrays).unwrap();
// create stream writer
let file = File::create("target/debug/testdata/float.stream").unwrap();
let mut stream_writer =
crate::ipc::writer::StreamWriter::try_new(file, &schema).unwrap();
stream_writer.write(&batch).unwrap();
stream_writer.finish().unwrap();
// read stream back
let file = File::open("target/debug/testdata/float.stream").unwrap();
let reader = StreamReader::try_new(file).unwrap();
reader.for_each(|batch| {
let batch = batch.unwrap();
assert!(
batch
.column(0)
.as_any()
.downcast_ref::<Float32Array>()
.unwrap()
.value(0)
!= 0.0
);
assert!(
batch
.column(1)
.as_any()
.downcast_ref::<Float32Array>()
.unwrap()
.value(0)
!= 0.0
);
})
}
/// Read gzipped JSON file
fn read_gzip_json(path: &str) -> ArrowJson {
let testdata = env::var("ARROW_TEST_DATA").expect("ARROW_TEST_DATA not defined");
let file = File::open(format!(
"{}/arrow-ipc-stream/integration/0.14.1/{}.json.gz",
testdata, path
))
.unwrap();
let mut gz = GzDecoder::new(&file);
let mut s = String::new();
gz.read_to_string(&mut s).unwrap();
// convert to Arrow JSON
let arrow_json: ArrowJson = serde_json::from_str(&s).unwrap();
arrow_json
}
}
| 35.364399 | 111 | 0.537132 |
5642028612f69825b68326a8ae6c9366da5e648b | 5,085 | /// Using modules and visibility modifiers we have now fully encapsulated the fields of our Ticket.
/// There is no way to create a Ticket instance skipping our validation.
/// At the same time though, we have made it impossible to access the fields of our struct,
/// because they are private!
///
/// Let's fix that introducing a bunch of accessor methods providing **read-only** access
/// to the fields in a ticket.
/// Let's import the Status enum we defined in the previous exercise, we won't have to modify it.
use super::visibility::ticket::Status;
/// Re-defining Ticket here because methods who need to access private fields
/// have to be defined in the same module of the struct itself, as we saw in the previous
/// exercise.
pub struct Ticket {
title: String,
description: String,
status: Status
}
/// Methods on a struct are defined in `impl` blocks.
impl Ticket {
/// The syntax looks very similar to the syntax to define functions.
/// There is only one peculiarity: if you want to access the struct in a method,
/// you need to take `self` as your first parameter in the method signature.
///
/// You have three options, depending on what you are trying to accomplish:
/// - self
/// - &self
/// - &mut self
///
/// We are now touching for the first time the topic of ownership, enforced by
/// the compiler via the (in)famous borrow-checker.
///
/// In Rust, each value has an owner, statically determined at compile-time.
/// There is only one owner for each value at any given time.
/// Tracking ownership at compile-time is what makes it possible for Rust not to have
/// garbage collection without requiring the developer to manage memory explicitly
/// (most of the times).
///
/// What can an owner do with a value `a`?
/// It can mutate it.
/// It can move ownership to another function or variable.
/// It can lend many immutable references (`&a`) to that value to other functions or variables.
/// It can lend a **single** mutable reference (`&mut a`) to that value to another
/// function or variable.
///
/// What can you do with a shared immutable reference (`&a`) to a value?
/// You can read the value and create more immutable references.
///
/// What can you do with a single mutable reference (`&mut a`) to a value?
/// You can mutate the underlying value.
///
/// Ownership is embedded in the type system: each function has to declare in its signature
/// what kind of ownership level it requires for all its arguments.
/// If the caller cannot fulfill those requirements, they cannot call the function.
///
/// In our case, we only need to read a field of our Ticket struct: it will be enough to ask
/// for an immutable reference to our struct.
///
/// If this sounds a bit complicated/vague, hold on: it will get clearer as you
/// move through the exercises and work your way through a bunch of compiler errors:
/// the compiler is the best pair programming buddy to get familiar with ownership
/// and its rules.
/// To read more on ownership check:
/// https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html
pub fn title(&self) -> &String {
/// We are returning an immutable reference (&) to our title field.
/// This will allow us to access this field without being able to mutate it:
/// encapsulation is guaranteed and we can rest assured that our invariants
/// cannot be violated.
&self.title
}
/// Replace __ with the proper types to get accessor methods for the other two fields.
/// If you are asking yourself why we are returning &str instead of &String, check out:
/// https://blog.thoughtram.io/string-vs-str-in-rust/
pub fn description(__) -> __ {
todo!()
}
pub fn status(__) -> __ {
todo!()
}
}
pub fn create_ticket(title: String, description: String, status: Status) -> Ticket {
if title.is_empty() {
panic!("Title cannot be empty!");
}
if title.len() > 50 {
panic!("A title cannot be longer than 50 characters!");
}
if description.len() > 3000 {
panic!("A description cannot be longer than 3000 characters!");
}
Ticket {
title,
description,
status,
}
}
#[cfg(test)]
mod tests {
use super::{create_ticket, Ticket};
use super::super::visibility::ticket::Status;
fn verify_without_tampering() {
let ticket: Ticket = create_ticket("A title".into(), "A description".into(), Status::ToDo);
/// Instead of accessing the field `ticket.description` we are calling the accessor
/// method, `ticket.description()`, which returns us a reference to the field value
/// and allows us to verify its value without having the chance to modify it.
assert_eq!(ticket.description(), "A description");
assert_eq!(ticket.title(), "A title");
}
}
| 42.375 | 99 | 0.655261 |
91b79c6334cf8774d686dc0f668e0ab92c701f25 | 6,889 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{format_err, Error},
fidl::endpoints::DiscoverableService,
fidl_fuchsia_inspect::TreeMarker,
fidl_fuchsia_inspect_deprecated::InspectMarker,
files_async,
fuchsia_zircon::DurationNum,
io_util,
lazy_static::lazy_static,
std::{convert::TryFrom, path::PathBuf, str::FromStr},
};
lazy_static! {
static ref EXPECTED_FILES: Vec<(String, InspectType)> = vec![
(<TreeMarker as fidl::endpoints::ServiceMarker>::DEBUG_NAME.to_string(), InspectType::Tree),
(
<InspectMarker as fidl::endpoints::ServiceMarker>::DEBUG_NAME.to_string(),
InspectType::DeprecatedFidl,
),
(".inspect".to_string(), InspectType::Vmo),
];
static ref READDIR_TIMEOUT_SECONDS: i64 = 15;
}
/// Gets an iterator over all inspect files in a directory.
pub async fn all_locations(root: &str) -> Result<Vec<InspectLocation>, Error> {
let mut path = std::env::current_dir()?;
path.push(root);
let dir_proxy = io_util::open_directory_in_namespace(
&path.to_string_lossy().to_string(),
io_util::OPEN_RIGHT_READABLE,
)?;
let result =
files_async::readdir_recursive(&dir_proxy, Some(READDIR_TIMEOUT_SECONDS.seconds())).await?;
for error in result.errors {
eprintln!("Error: {}", error)
}
let locations = result
.entries
.into_iter()
.filter_map(|entry| {
let mut path = PathBuf::from(&root);
path.push(&entry.name);
EXPECTED_FILES.iter().find(|(filename, _)| entry.name.ends_with(filename)).map(
|(_, inspect_type)| InspectLocation {
inspect_type: inspect_type.clone(),
path,
parts: vec![],
},
)
})
.collect::<Vec<InspectLocation>>();
Ok(locations)
}
/// Type of the inspect file.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub enum InspectType {
Vmo,
DeprecatedFidl,
Tree,
}
/// InspectLocation of an inspect file.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct InspectLocation {
/// The type of the inspect location.
pub inspect_type: InspectType,
/// The path to the inspect location.
pub path: PathBuf,
/// The parts of the inspect object this location cares about.
/// If empty, it means all.
pub parts: Vec<String>,
}
impl InspectLocation {
pub fn absolute_path(&self) -> Result<String, Error> {
// Note: self.path.canonicalize() returns error for files such as:
// /hub/r/test/*/c/iquery_example_component.cmx/*/out/diagnostics/root.inspect
// Hence, getting the absolute path manually.
let current_dir = std::env::current_dir()?.to_string_lossy().to_string();
let path_string =
self.path.canonicalize().unwrap_or(self.path.clone()).to_string_lossy().to_string();
if path_string.is_empty() {
return Ok(current_dir);
}
if path_string.chars().next() == Some('/') {
return Ok(path_string);
}
if current_dir == "/" {
return Ok(format!("/{}", path_string));
}
Ok(format!("{}/{}", current_dir, path_string))
}
pub fn absolute_path_to_string(&self) -> Result<String, Error> {
Ok(strip_service_suffix(self.absolute_path()?))
}
pub fn query_path(&self) -> Vec<String> {
let mut path = vec![];
if !self.parts.is_empty() {
path = self.parts.clone();
path.pop(); // Remove the last one given that |hierarchy.name| is that one.
path.insert(0, "root".to_string()); // Parts won't contain "root"
}
path
}
}
impl FromStr for InspectLocation {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts = s.split("#").collect::<Vec<&str>>();
if parts.len() > 2 {
return Err(format_err!("Path contains more than one #"));
}
let mut inspect_parts = vec![];
if parts.len() == 2 {
inspect_parts.extend(parts[1].split("/"));
}
// Some valid locations won't include the service name in the name and will be just the
// directory. Append the name and attempt to load that file.
let mut location = InspectLocation::try_from(PathBuf::from(parts[0]))
.or_else(|_| {
let mut path = PathBuf::from(parts[0]);
path.push(InspectMarker::SERVICE_NAME);
InspectLocation::try_from(path)
})
.or_else(|_| {
let mut path = PathBuf::from(parts[0]);
path.push(TreeMarker::SERVICE_NAME);
InspectLocation::try_from(path)
})?;
location.parts = inspect_parts.into_iter().map(|p| p.to_string()).collect();
Ok(location)
}
}
fn strip_service_suffix(string: String) -> String {
string
.replace(&format!("/{}", InspectMarker::SERVICE_NAME), "")
.replace(&format!("/{}", TreeMarker::SERVICE_NAME), "")
}
impl ToString for InspectLocation {
fn to_string(&self) -> String {
strip_service_suffix(self.path.to_string_lossy().to_string())
}
}
impl TryFrom<PathBuf> for InspectLocation {
type Error = anyhow::Error;
fn try_from(path: PathBuf) -> Result<Self, Self::Error> {
match path.file_name() {
None => return Err(format_err!("Failed to get filename")),
Some(filename) => {
if filename == InspectMarker::SERVICE_NAME && path.exists() {
Ok(InspectLocation {
inspect_type: InspectType::DeprecatedFidl,
path,
parts: vec![],
})
} else if filename == TreeMarker::SERVICE_NAME && path.exists() {
Ok(InspectLocation { inspect_type: InspectType::Tree, path, parts: vec![] })
} else if filename.to_string_lossy().ends_with(".inspect") {
Ok(InspectLocation { inspect_type: InspectType::Vmo, path, parts: vec![] })
} else {
return Err(format_err!("Not an inspect file"));
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn query_path() {
let location = InspectLocation {
inspect_type: InspectType::Vmo,
path: PathBuf::from("/hub/c/test.cmx/123/out/diagnostics"),
parts: vec!["a".to_string(), "b".to_string()],
};
assert_eq!(location.query_path(), vec!["root".to_string(), "a".to_string()]);
}
}
| 34.10396 | 100 | 0.579184 |
0819830412de40c74e8c834b528a5385ad89f8ae | 421 | use bellperson::bls::Fr;
use ff::Field;
use storage_proofs_core::hasher::Domain;
pub fn encode<T: Domain>(key: T, value: T) -> T {
let mut result: Fr = value.into();
let key: Fr = key.into();
result.add_assign(&key);
result.into()
}
pub fn decode<T: Domain>(key: T, value: T) -> T {
let mut result: Fr = value.into();
let key: Fr = key.into();
result.sub_assign(&key);
result.into()
}
| 21.05 | 49 | 0.603325 |
e67322561ac730ea03b505d8d07f57ccac17f53f | 1,322 | #[cfg(test)]
mod tests {
use std::path::PathBuf;
use asset_model::config::{AssetRecord, AssetSlugBuilder};
use pretty_assertions::assert_eq;
use asset_loading::AssetIndexingUtils;
#[test]
fn returns_asset_record_when_namespace_and_path_valid() {
let path = PathBuf::from("my/asset");
// kcov-ignore-start
assert_eq!(
// kcov-ignore-end
Some(asset_record("user1", "asset", path.clone())),
AssetIndexingUtils::asset_record("user1".to_string(), path)
);
}
#[test]
fn returns_none_when_namespace_invalid() {
assert_eq!(
None,
AssetIndexingUtils::asset_record(" invalid".to_string(), PathBuf::from("my/asset"))
);
}
#[test]
fn returns_none_when_path_invalid() {
assert_eq!(
None,
AssetIndexingUtils::asset_record("user1".to_string(), PathBuf::from("/"))
);
}
fn asset_record(namespace: &str, name: &str, path: PathBuf) -> AssetRecord {
AssetRecord {
asset_slug: AssetSlugBuilder::default()
.namespace(namespace.to_string())
.name(name.to_string())
.build()
.expect("Failed to build asset slug."),
path,
}
}
}
| 27.541667 | 95 | 0.56354 |
ace2e677aca728ba74e08ed3148915f6d36647bf | 3,440 | use crate::{config, err::LauncherError};
use config::objects::{Match, MatchKind};
use regex::Regex;
use std::process::Command;
pub(crate) fn launch_browser(url: &str) -> Result<(), LauncherError> {
let config_root = match config::read_config() {
Err(err_msg) => {
let msg = "Unable to load config.toml";
log::error!("{} Err: {}", msg, err_msg);
panic!("{}", msg);
}
Ok(r) => r,
};
let mut browsers: Vec<_> = config_root.browser.iter().collect();
browsers.sort_by(|a, b| a.1.priority.cmp(&b.1.priority));
browsers.reverse();
for (_, browser) in browsers {
if is_match(url, &browser.matching) {
let browser_exe = browser.path.replacen("{url}", url, 1);
let args: Vec<_> = browser
.args
.iter()
.map(|a| a.replacen("{url}", url, 1))
.collect();
execute(
&browser_exe,
&args.iter().map(|s| s.as_ref()).collect::<Vec<&str>>()[..],
);
break;
}
};
Ok(())
}
fn is_match(url: &str, matches: &[Match]) -> bool {
let mut res = false;
for m in matches {
res = match m.kind {
MatchKind::SimpleMatch => match_simple(url, &m.pattern),
MatchKind::Regex => match_regex(url, &m.pattern),
};
if res {
break;
}
}
res
}
fn match_simple(subject: &str, pattern: &str) -> bool {
let pattern = format!(
"^(https?|ftp)://{}.*",
pattern
.to_owned()
.replacen("+", "\\+", 99)
.replacen("-", "\\-", 99)
.replacen("?", "\\?", 99)
.replacen(".", "\\.", 99)
.replacen("*", ".*", 99)
);
match_regex(subject, &pattern)
}
fn match_regex(subject: &str, pattern: &str) -> bool {
let rx = Regex::new(pattern).expect("Regex failed to compile.");
rx.is_match(subject)
}
fn execute(exe: &str, args: &[&str]) {
if let Err(msg) = Command::new(exe).args(args).spawn() {
log::error!("failed to spawn process {}", msg)
};
}
#[cfg(test)]
mod test {
#[test]
fn test_patterns_simple() {
assert!(super::match_simple("https://www.example.com", "*"));
assert!(super::match_simple(
"https://www.example.com",
"*.example.com"
));
assert!(super::match_simple(
"http://www.example.com",
"*.example.com"
));
assert!(!super::match_simple(
"http://www.example.com",
"example.com"
));
assert!(super::match_simple("http://example.com", "example.com"));
}
#[test]
fn test_patterns_paramter() {
assert!(super::match_simple(
"http://store.steampowered.com/app/203770/Crusader_Kings_II/",
"*.steampowered.com"
));
assert!(!super::match_simple(
"http://store.steampowered.com/app/203770/Crusader_Kings_II/",
"*.steamcommunity.com"
));
assert!(super::match_simple(
"https://steamcommunity.com/sharedfiles/filedetails/?id=1526918750",
"steamcommunity.com"
));
assert!(super::match_simple(
"https://steamcommunity.com/sharedfiles/filedetails/?id=1526918750&asd=kas1235",
"steamcommunity.com"
));
}
}
| 27.52 | 92 | 0.506686 |
d543d685daaf95515d310da2cea3ea9d2fa8ecd0 | 10,865 | #[doc = "Register `TSR` reader"]
pub struct R(crate::R<TSR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<TSR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<TSR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<TSR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `TSR` writer"]
pub struct W(crate::W<TSR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<TSR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<TSR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<TSR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `UBR` reader - Used Bit Read"]
pub struct UBR_R(crate::FieldReader<bool, bool>);
impl UBR_R {
pub(crate) fn new(bits: bool) -> Self {
UBR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for UBR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `UBR` writer - Used Bit Read"]
pub struct UBR_W<'a> {
w: &'a mut W,
}
impl<'a> UBR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Field `COL` reader - Collision Occurred"]
pub struct COL_R(crate::FieldReader<bool, bool>);
impl COL_R {
pub(crate) fn new(bits: bool) -> Self {
COL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for COL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `COL` writer - Collision Occurred"]
pub struct COL_W<'a> {
w: &'a mut W,
}
impl<'a> COL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Field `RLE` reader - Retry Limit Exceeded"]
pub struct RLE_R(crate::FieldReader<bool, bool>);
impl RLE_R {
pub(crate) fn new(bits: bool) -> Self {
RLE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RLE_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RLE` writer - Retry Limit Exceeded"]
pub struct RLE_W<'a> {
w: &'a mut W,
}
impl<'a> RLE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `TXGO` reader - Transmit Go"]
pub struct TXGO_R(crate::FieldReader<bool, bool>);
impl TXGO_R {
pub(crate) fn new(bits: bool) -> Self {
TXGO_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXGO_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXGO` writer - Transmit Go"]
pub struct TXGO_W<'a> {
w: &'a mut W,
}
impl<'a> TXGO_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
#[doc = "Field `TFC` reader - Transmit Frame Corruption Due to AHB Error"]
pub struct TFC_R(crate::FieldReader<bool, bool>);
impl TFC_R {
pub(crate) fn new(bits: bool) -> Self {
TFC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TFC_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TFC` writer - Transmit Frame Corruption Due to AHB Error"]
pub struct TFC_W<'a> {
w: &'a mut W,
}
impl<'a> TFC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
#[doc = "Field `TXCOMP` reader - Transmit Complete"]
pub struct TXCOMP_R(crate::FieldReader<bool, bool>);
impl TXCOMP_R {
pub(crate) fn new(bits: bool) -> Self {
TXCOMP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXCOMP_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXCOMP` writer - Transmit Complete"]
pub struct TXCOMP_W<'a> {
w: &'a mut W,
}
impl<'a> TXCOMP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5);
self.w
}
}
#[doc = "Field `HRESP` reader - HRESP Not OK"]
pub struct HRESP_R(crate::FieldReader<bool, bool>);
impl HRESP_R {
pub(crate) fn new(bits: bool) -> Self {
HRESP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for HRESP_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `HRESP` writer - HRESP Not OK"]
pub struct HRESP_W<'a> {
w: &'a mut W,
}
impl<'a> HRESP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8);
self.w
}
}
impl R {
#[doc = "Bit 0 - Used Bit Read"]
#[inline(always)]
pub fn ubr(&self) -> UBR_R {
UBR_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Collision Occurred"]
#[inline(always)]
pub fn col(&self) -> COL_R {
COL_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Retry Limit Exceeded"]
#[inline(always)]
pub fn rle(&self) -> RLE_R {
RLE_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Transmit Go"]
#[inline(always)]
pub fn txgo(&self) -> TXGO_R {
TXGO_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Transmit Frame Corruption Due to AHB Error"]
#[inline(always)]
pub fn tfc(&self) -> TFC_R {
TFC_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Transmit Complete"]
#[inline(always)]
pub fn txcomp(&self) -> TXCOMP_R {
TXCOMP_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 8 - HRESP Not OK"]
#[inline(always)]
pub fn hresp(&self) -> HRESP_R {
HRESP_R::new(((self.bits >> 8) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Used Bit Read"]
#[inline(always)]
pub fn ubr(&mut self) -> UBR_W {
UBR_W { w: self }
}
#[doc = "Bit 1 - Collision Occurred"]
#[inline(always)]
pub fn col(&mut self) -> COL_W {
COL_W { w: self }
}
#[doc = "Bit 2 - Retry Limit Exceeded"]
#[inline(always)]
pub fn rle(&mut self) -> RLE_W {
RLE_W { w: self }
}
#[doc = "Bit 3 - Transmit Go"]
#[inline(always)]
pub fn txgo(&mut self) -> TXGO_W {
TXGO_W { w: self }
}
#[doc = "Bit 4 - Transmit Frame Corruption Due to AHB Error"]
#[inline(always)]
pub fn tfc(&mut self) -> TFC_W {
TFC_W { w: self }
}
#[doc = "Bit 5 - Transmit Complete"]
#[inline(always)]
pub fn txcomp(&mut self) -> TXCOMP_W {
TXCOMP_W { w: self }
}
#[doc = "Bit 8 - HRESP Not OK"]
#[inline(always)]
pub fn hresp(&mut self) -> HRESP_W {
HRESP_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Transmit Status Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tsr](index.html) module"]
pub struct TSR_SPEC;
impl crate::RegisterSpec for TSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [tsr::R](R) reader structure"]
impl crate::Readable for TSR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [tsr::W](W) writer structure"]
impl crate::Writable for TSR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets TSR to value 0"]
impl crate::Resettable for TSR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 27.930591 | 408 | 0.550943 |
7a30181a79ff01c9ea0f3f3d3291b91b3d45a67b | 11,135 | use orbtk::{
prelude::*,
theme_default::{THEME_DEFAULT, THEME_DEFAULT_COLORS_DARK, THEME_DEFAULT_FONTS},
theming::config::ThemeConfig,
};
static DARK_EXT: &str = include_str!("../assets/calculator/calculator_dark.ron");
fn theme() -> Theme {
register_default_fonts(Theme::from_config(
ThemeConfig::from(DARK_EXT)
.extend(ThemeConfig::from(THEME_DEFAULT))
.extend(ThemeConfig::from(THEME_DEFAULT_COLORS_DARK))
.extend(ThemeConfig::from(THEME_DEFAULT_FONTS)),
))
}
#[derive(Debug, Copy, Clone)]
enum Action {
Digit(char),
Operator(char),
}
#[derive(Default, AsAny)]
pub struct MainViewState {
input: String,
operator: Option<char>,
left_side: Option<f64>,
right_side: Option<f64>,
action: Option<Action>,
}
impl MainViewState {
fn action(&mut self, action: impl Into<Option<Action>>) {
self.action = action.into();
}
fn calculate(&mut self, ctx: &mut Context) {
let mut result = 0.0;
if let Some(operator) = self.operator {
if let Some(left_side) = self.left_side {
if let Some(right_side) = self.right_side {
match operator {
'+' => {
result = left_side + right_side;
}
'-' => {
result = left_side - right_side;
}
'*' => {
result = left_side * right_side;
}
'/' => {
result = left_side / right_side;
}
_ => {}
}
}
}
}
if result % 1.0 == 0.0 {
MainView::text_set(&mut ctx.widget(), format!("{}", result));
} else {
MainView::text_set(&mut ctx.widget(), format!("{:.8}", result));
}
self.left_side = Some(result);
self.right_side = None;
}
}
impl State for MainViewState {
fn update(&mut self, _: &mut Registry, ctx: &mut Context) {
if let Some(action) = self.action {
match action {
Action::Digit(digit) => {
self.input.push(digit);
TextBlock::text_mut(&mut ctx.child("input")).push(digit);
}
Action::Operator(operator) => match operator {
'C' => {
self.input.clear();
self.left_side = None;
self.operator = None;
self.right_side = None;
MainView::text_mut(&mut ctx.widget()).clear();
TextBlock::text_mut(&mut ctx.child("input")).clear();
}
'=' => {
self.right_side = Some(self.input.parse().unwrap_or(0.));
self.calculate(ctx);
self.input.clear();
self.left_side = None;
self.operator = None;
self.right_side = None;
TextBlock::text_mut(&mut ctx.child("input")).clear();
}
_ => {
if self.input.is_empty() {
return;
}
if self.left_side.is_none() {
self.left_side = Some(self.input.parse().unwrap_or(0.));
} else {
self.right_side = Some(self.input.parse().unwrap_or(0.));
self.calculate(ctx);
}
TextBlock::text_mut(&mut ctx.child("input")).push(operator);
self.input.clear();
self.operator = Some(operator);
}
},
}
self.action = None;
}
}
}
fn generate_digit_button(
ctx: &mut BuildContext,
id: Entity,
sight: char,
primary: bool,
column: usize,
column_span: usize,
row: usize,
) -> Entity {
let style = if primary {
"button_calculator_primary"
} else {
"button_calculator"
};
let button = Button::new()
.style(style)
.min_size(48.0, 48)
.text(sight.to_string())
.on_click(move |states, _| -> bool {
state(id, states).action(Action::Digit(sight));
true
})
.attach(Grid::column(column))
.attach(Grid::row(row))
.attach(Grid::column_span(column_span));
button.build(ctx)
}
fn generate_operation_button(
ctx: &mut BuildContext,
id: Entity,
sight: char,
primary: bool,
column: usize,
column_span: usize,
row: usize,
) -> Entity {
let style = if primary {
"button_calculator_primary"
} else {
"button_calculator"
};
let button = Button::new()
.style(style)
.min_size(48.0, 48)
.text(sight.to_string())
.on_click(move |states, _| -> bool {
state(id, states).action(Action::Operator(sight));
true
})
.attach(Grid::column(column))
.attach(Grid::column_span(column_span))
.attach(Grid::row(row));
button.build(ctx)
}
widget!(MainView<MainViewState> {
text: String
});
impl Template for MainView {
fn template(self, id: Entity, ctx: &mut BuildContext) -> Self {
self.name("MainView").width(212).height(336).text("").child(
Grid::new()
.rows(Rows::create().push(72).push("*"))
.child(
Container::new()
.padding(8)
.style("header_area")
.attach(Grid::row(0))
.child(
Grid::new()
.child(
ScrollViewer::new()
.mode(("custom", "disabled"))
.child(
TextBlock::new()
.width(0)
.height(14)
.text("")
.style("input")
.id("input")
.v_align("start")
.build(ctx),
)
.build(ctx),
)
.child(
TextBlock::new()
.style("result")
.text(id)
.v_align("end")
.h_align("end")
.build(ctx),
)
.build(ctx),
)
.build(ctx),
)
.child(
Container::new()
.style("content_area")
.padding(4)
.attach(Grid::row(1))
.child(
Grid::new()
.columns(
Columns::create()
.push(48)
.push(4)
.push(48)
.push(4)
.push(48)
.push(4)
.push(48),
)
.rows(
Rows::create()
.push(48)
.push(4)
.push(48)
.push(4)
.push(48)
.push(4)
.push(48)
.push(4)
.push(48),
)
// row 0
.child(generate_operation_button(ctx, id, 'C', false, 0, 5, 0))
.child(generate_operation_button(ctx, id, '/', true, 6, 3, 0))
// row 2
.child(generate_digit_button(ctx, id, '7', false, 0, 1, 2))
.child(generate_digit_button(ctx, id, '8', false, 2, 1, 2))
.child(generate_digit_button(ctx, id, '9', false, 4, 1, 2))
.child(generate_operation_button(ctx, id, '*', true, 6, 1, 2))
// row 4
.child(generate_digit_button(ctx, id, '4', false, 0, 1, 4))
.child(generate_digit_button(ctx, id, '5', false, 2, 1, 4))
.child(generate_digit_button(ctx, id, '6', false, 4, 1, 4))
.child(generate_operation_button(ctx, id, '-', true, 6, 1, 4))
// row 6
.child(generate_digit_button(ctx, id, '1', false, 0, 1, 6))
.child(generate_digit_button(ctx, id, '2', false, 2, 1, 6))
.child(generate_digit_button(ctx, id, '3', false, 4, 1, 6))
.child(generate_operation_button(ctx, id, '+', true, 6, 1, 6))
// row 8
.child(generate_digit_button(ctx, id, '0', false, 0, 3, 8))
.child(generate_digit_button(ctx, id, '.', false, 4, 1, 8))
.child(generate_operation_button(ctx, id, '=', true, 6, 1, 8))
.build(ctx),
)
.build(ctx),
)
.build(ctx),
)
}
}
fn main() {
Application::new()
.theme(theme())
.window(|ctx| {
Window::new()
.title("OrbTk - Calculator example")
.position((100, 100))
.size(212.0, 336)
.child(MainView::new().build(ctx))
.build(ctx)
})
.run();
}
// helper to request MainViewState
fn state<'a>(id: Entity, states: &'a mut StatesContext) -> &'a mut MainViewState {
states.get_mut(id)
}
| 36.870861 | 95 | 0.370903 |
79c9b13222ec7bd357fdf4f957258fc16f23a11c | 6,836 | use crate::{
curves::{
models::{ModelParameters, SWModelParameters},
PairingEngine,
},
fields::{
fp12_2over3over2::{Fp12, Fp12Parameters},
fp2::Fp2Parameters,
fp6_3over2::Fp6Parameters,
Field, Fp2, PrimeField, SquareRootField,
},
};
use derivative::Derivative;
use num_traits::One;
use std::{marker::PhantomData, ops::MulAssign};
pub mod g1;
pub mod g2;
pub trait BnParameters: 'static {
const SIX_U_PLUS_2_NAF: &'static [i8];
const U: &'static [u64];
type Fp: PrimeField + SquareRootField + Into<<Self::Fp as PrimeField>::BigInt>;
type Fp2Params: Fp2Parameters<Fp = Self::Fp>;
type Fp6Params: Fp6Parameters<Fp2Params = Self::Fp2Params>;
type Fp12Params: Fp12Parameters<Fp6Params = Self::Fp6Params>;
type G1Parameters: SWModelParameters<BaseField = Self::Fp>;
type G2Parameters: SWModelParameters<
BaseField = Fp2<Self::Fp2Params>,
ScalarField = <Self::G1Parameters as ModelParameters>::ScalarField,
>;
const CUBIC_NONRESIDUE_TO_Q_MINUS_1_OVER_2: Fp2<Self::Fp2Params>;
}
#[derive(Derivative)]
#[derivative(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub struct Bn<P: BnParameters>(PhantomData<fn() -> P>);
pub use self::{
g1::{G1Affine, G1Prepared, G1Projective},
g2::{G2Affine, G2Prepared, G2Projective},
};
impl<P: BnParameters> Bn<P> {
// Final steps of the line function on prepared coefficients
fn ell(
f: &mut Fp12<P::Fp12Params>,
coeffs: &(Fp2<P::Fp2Params>, Fp2<P::Fp2Params>, Fp2<P::Fp2Params>),
p: &G1Affine<P>,
) {
let mut c0 = coeffs.0;
let mut c1 = coeffs.1;
c0.c0.mul_assign(&p.y);
c0.c1.mul_assign(&p.y);
c1.c0.mul_assign(&p.x);
c1.c1.mul_assign(&p.x);
// Sparse multiplication in Fq12
f.mul_by_034(&c0, &c1, &coeffs.2);
}
fn exp_by_x(f: &mut Fp12<P::Fp12Params>) {
*f = f.pow(&P::U);
}
}
impl<P: BnParameters> PairingEngine for Bn<P>
/*
where
G1Affine<P>: PairingCurve<
BaseField = <P::G1Parameters as ModelParameters>::BaseField,
ScalarField = <P::G1Parameters as ModelParameters>::ScalarField,
Projective = G1Projective<P>,
PairWith = G2Affine<P>,
Prepared = G1Prepared<P>,
PairingResult = Fp12<P::Fp12Params>,
>,
G2Affine<P>: PairingCurve<
BaseField = <P::G2Parameters as ModelParameters>::BaseField,
ScalarField = <P::G1Parameters as ModelParameters>::ScalarField,
Projective = G2Projective<P>,
PairWith = G1Affine<P>,
Prepared = G2Prepared<P>,
PairingResult = Fp12<P::Fp12Params>,
>, */
{
type Fr = <P::G1Parameters as ModelParameters>::ScalarField;
type G1Projective = G1Projective<P>;
type G1Affine = G1Affine<P>;
type G1Prepared = G1Prepared<P>;
type G2Projective = G2Projective<P>;
type G2Affine = G2Affine<P>;
type G2Prepared = G2Prepared<P>;
type Fq = P::Fp;
type Fqe = Fp2<P::Fp2Params>;
type Fqk = Fp12<P::Fp12Params>;
fn miller_loop<'a, I>(i: I) -> Self::Fqk
where
I: IntoIterator<Item = &'a (Self::G1Prepared, Self::G2Prepared)>,
{
let mut pairs = vec![];
for (p, q) in i {
if !p.is_zero() && !q.is_zero() {
pairs.push((p, q.ell_coeffs.iter()));
}
}
let mut f = Self::Fqk::one();
for i in (1..P::SIX_U_PLUS_2_NAF.len()).rev() {
if i != P::SIX_U_PLUS_2_NAF.len() - 1 {
f.square_in_place();
}
for (p, ref mut coeffs) in &mut pairs {
Self::ell(&mut f, coeffs.next().unwrap(), &p.0);
}
let x = P::SIX_U_PLUS_2_NAF[i - 1];
match x {
1 => {
for (p, ref mut coeffs) in &mut pairs {
Self::ell(&mut f, coeffs.next().unwrap(), &p.0);
}
},
-1 => {
for (p, ref mut coeffs) in &mut pairs {
Self::ell(&mut f, coeffs.next().unwrap(), &p.0);
}
},
_ => continue,
}
}
// two additional steps: for q1 and minus q2
for (p, ref mut coeffs) in &mut pairs {
Self::ell(&mut f, coeffs.next().unwrap(), &p.0);
}
for (p, ref mut coeffs) in &mut pairs {
Self::ell(&mut f, coeffs.next().unwrap(), &p.0);
}
for (_p, ref mut coeffs) in &mut pairs {
assert_eq!(coeffs.next(), None);
}
f
}
fn final_exponentiation(r: &Self::Fqk) -> Option<Self::Fqk> {
let mut f1 = *r;
f1.conjugate();
match r.inverse() {
Some(mut f2) => {
let mut r = f1;
r.mul_assign(&f2);
f2 = r;
r.frobenius_map(2);
r.mul_assign(&f2);
let mut fp = r;
fp.frobenius_map(1);
let mut fp2 = r;
fp2.frobenius_map(2);
let mut fp3 = fp2;
fp3.frobenius_map(1);
let mut fu = r;
Self::exp_by_x(&mut fu);
let mut fu2 = fu;
Self::exp_by_x(&mut fu2);
let mut fu3 = fu2;
Self::exp_by_x(&mut fu3);
let mut y3 = fu;
y3.frobenius_map(1);
let mut fu2p = fu2;
fu2p.frobenius_map(1);
let mut fu3p = fu3;
fu3p.frobenius_map(1);
let mut y2 = fu2;
y2.frobenius_map(2);
let mut y0 = fp;
y0.mul_assign(&fp2);
y0.mul_assign(&fp3);
let mut y1 = r;
y1.conjugate();
let mut y5 = fu2;
y5.conjugate();
y3.conjugate();
let mut y4 = fu;
y4.mul_assign(&fu2p);
y4.conjugate();
let mut y6 = fu3;
y6.mul_assign(&fu3p);
y6.conjugate();
y6.square_in_place();
y6.mul_assign(&y4);
y6.mul_assign(&y5);
let mut t1 = y3;
t1.mul_assign(&y5);
t1.mul_assign(&y6);
y6.mul_assign(&y2);
t1.square_in_place();
t1.mul_assign(&y6);
t1.square_in_place();
let mut t0 = t1;
t0.mul_assign(&y1);
t1.mul_assign(&y0);
t0.square_in_place();
t0.mul_assign(&t1);
Some(t0)
},
None => None,
}
}
}
| 27.788618 | 83 | 0.495173 |
267659e312b3881b73d65e161d07f83926d1945c | 4,840 | extern crate structopt;
use medic::entries::Entry;
use medic::*;
use std::path::PathBuf;
use structopt::StructOpt;
/// Medic
#[derive(StructOpt, Debug)]
#[structopt(name = "medic")]
struct Opt {
/// Give verbose output
#[structopt(short = "v", long = "verbose")]
verbose: bool,
/// Provide key file, if unlocking the KeePass databases requires one
#[structopt(short = "k", long = "keyfile", parse(from_os_str))]
keyfile: Option<PathBuf>,
/// Check passwords against breached passwords online via the HaveIBeenPwned API. More info
/// here:
/// https://www.troyhunt.com/ive-just-launched-pwned-passwords-version-2/#cloudflareprivacyandkanonymity
#[structopt(long = "online")]
online: bool,
/// Provide file containing SHA-1 hashes of passwords to check database against. To download a copy of very large list of
/// password hashes from HaveIBeenPwned, go to: https://haveibeenpwned.com/Passwords
#[structopt(short = "h", long = "hashfile", parse(from_os_str))]
hash_file: Option<PathBuf>,
/// Check database for duplicate passwords
#[structopt(short = "d", long = "duplicate")]
check_duplicate: bool,
/// Check database for weak passwords
#[structopt(short = "w", long = "weak")]
check_weak: bool,
/// Print results of health check to a file
#[structopt(short = "o", long = "output")]
output: Option<String>,
/// KeePass database to check. Can either be a kdbx file or an exported CSV version of a
/// KeePass database.
#[structopt(name = "KEEPASS DATABASE FILE", parse(from_os_str))]
keepass_db: PathBuf,
}
fn main() {
let opt = Opt::from_args();
if opt.verbose {
println!("{:?}", opt);
}
let keepass_db_file_path = opt.keepass_db;
let hash_file: Option<PathBuf> = opt.hash_file;
let keyfile: Option<PathBuf> = opt.keyfile;
let check_online = opt.online;
let output_dest: Destination = match opt.output {
Some(file_path) => Destination::FilePath(file_path),
None => Destination::Terminal,
};
match &output_dest {
Destination::FilePath(file_path) => {
create_file(&Destination::FilePath(file_path.to_string()))
.expect("Couldn't write to file");
}
Destination::Terminal => (),
}
if hash_file == None && !check_online && !opt.check_duplicate && !opt.check_weak {
println!("Whoops! I have nothing the check against");
println!("You must either:\n1. Provide a file with hashes of passwords to check against \nOR\n2. Use the --online flag to check your passwords online via HaveIBeenPwned API\nOR\n3. Use one or both of -d or -w flags to check for duplicate and/or weak passwords");
println!("Run --help for more information");
return;
}
let entries: Vec<Entry> = match get_entries(keepass_db_file_path, keyfile) {
Some(entries) => entries,
None => panic!("Didn't find any entries in provided KeePass database"),
};
if opt.check_weak {
match check_for_and_display_weak_passwords(&entries, &output_dest) {
Ok(()) => (),
Err(e) => panic!("Error checking for weak passwords!: {}", e),
}
}
if opt.check_duplicate {
let digest_map = match make_digest_map(&entries) {
Ok(map) => map,
Err(e) => panic!("Failed to check for duplicate passwords: {}", e),
};
present_duplicated_entries(digest_map, &output_dest)
.expect("Error presenting duplicate passwords");
}
if let Some(hash_file) = hash_file {
println!("Checking KeePass database against provided file of hashed passwords");
let breached_entries =
match check_database_offline(hash_file, &entries, VisibilityPreference::Show) {
Ok(breached_entries) => breached_entries,
Err(e) => panic!("Error checking database offline: {}", e),
};
present_breached_entries(&breached_entries, &output_dest)
.expect("Error presenting breached entries");
}
if check_online {
println!(
"\nAre you sure you want to check the KeePass database against HaveIBeenPwned API? (y/N)"
);
match gets() {
Ok(answer) => {
if answer == "y" {
let breached_entries = match check_database_online(&entries) {
Ok(breached_entries) => breached_entries,
Err(e) => panic!("Error: {}", e),
};
present_breached_entries(&breached_entries, &output_dest)
.expect("Error presenting breached errors");
}
}
Err(e) => eprintln!("Error reading your answer: {}", e),
}
}
}
| 39.032258 | 270 | 0.61405 |
6a75e6e5ed56c4a4dcbe163e1bcaf829940759c5 | 11,872 | mod error;
use crate::error::InternalError;
use clap::Parser;
use std::fs::{self, File};
use std::io::ErrorKind;
use std::path::Path;
use std::process::{Command, Stdio};
/// This plugin supports the installation, update and removal of a single unversioned apama project named "project".
/// Installation of multiple parallel projects is not supported.
/// Installing a project will replace the existing project with the new one.
/// Delta update of a project(for eg: updating just the `mon` file definitions in the project) is not supported either.
#[derive(Parser)]
struct ApamaCli {
#[clap(subcommand)]
operation: PluginOp,
}
#[derive(clap::Subcommand)]
pub enum PluginOp {
/// List the one and only apama project if one is installed
List,
/// Install an apama project
Install {
module: String,
#[clap(short = 'v', long = "--module-version")]
version: Option<String>,
#[clap(long = "--file")]
file_path: String,
},
/// Remove an apama project
Remove {
module: String,
#[clap(short = 'v', long = "--module-version")]
version: Option<String>,
},
/// Prepare a sequences of install/remove commands
Prepare,
/// Finalize a sequences of install/remove commands
Finalize,
}
const APAMA_ENV_EXE: &str = "/opt/softwareag/Apama/bin/apama_env";
const TEDGE_APAMA_PROJECT_DIR: &str = "/etc/tedge/apama/project";
const TMP_APAMA_PROJECT_DIR: &str = "/tmp/tedge_apama_project";
const DEFAULT_APAMA_PROJECT_NAME: &str = "unnamed";
const ENGINE_INJECT_CMD: &str = "engine_inject";
const ENGINE_INSPECT_CMD: &str = "engine_inspect";
const ENGINE_DELETE_CMD: &str = "engine_delete";
const PROJECT_SUFFIX: &str = "project";
const MON_SUFFIX: &str = "mon";
enum ApamaModule {
Project(String),
MonFile(String),
}
fn run(operation: PluginOp) -> Result<(), InternalError> {
let tedge_env_exe_path = Path::new(APAMA_ENV_EXE);
let tedge_apama_project_path: &Path = Path::new(TEDGE_APAMA_PROJECT_DIR);
let tmp_apama_project_path: &Path = Path::new(TMP_APAMA_PROJECT_DIR);
if !tedge_env_exe_path.exists() {
return Err(InternalError::ApamaNotInstalled);
}
match operation {
PluginOp::List => {
if tedge_apama_project_path.exists() {
// Print the project name
println!("{}::project\t", get_project_name(tedge_apama_project_path));
// Print the installed monitors
for monitor in get_installed_monitors()? {
println!("{}::mon\t", monitor)
}
}
Ok(())
}
PluginOp::Prepare => Ok(()),
PluginOp::Finalize => {
// Cleanup any temporary artefacts created by this plugin
if tmp_apama_project_path.exists() {
fs::remove_dir_all(tmp_apama_project_path)?;
}
Ok(())
}
PluginOp::Install {
module,
version: _,
file_path,
} => match apama_module_from_string(&module)? {
ApamaModule::Project(_) => install_project(Path::new(&file_path)),
ApamaModule::MonFile(monitor_name) => {
install_or_update_monitor(&monitor_name, &file_path)
}
},
PluginOp::Remove { module, version: _ } => match apama_module_from_string(&module)? {
ApamaModule::Project(project_name) => remove_project(&project_name),
ApamaModule::MonFile(monitor_name) => remove_monitor(&monitor_name),
},
}
}
fn get_project_name(tedge_apama_project_path: &Path) -> String {
let tedge_apama_project_descriptor_path = tedge_apama_project_path.join(".project");
if tedge_apama_project_descriptor_path.exists() {
if let Ok(xml_content) = fs::read_to_string(tedge_apama_project_descriptor_path) {
if let Ok(root) = roxmltree::Document::parse(xml_content.as_str()) {
return root
.descendants()
.find(|node| node.has_tag_name("name"))
.and_then(|node| node.first_child())
.and_then(|node| node.text())
.map(str::to_string)
.unwrap_or_else(|| DEFAULT_APAMA_PROJECT_NAME.into());
}
}
}
DEFAULT_APAMA_PROJECT_NAME.into()
}
fn apama_module_from_string(module: &str) -> Result<ApamaModule, InternalError> {
match module.rsplit_once("::") {
Some((prefix, suffix)) => match suffix {
PROJECT_SUFFIX => Ok(ApamaModule::Project(prefix.into())),
MON_SUFFIX => Ok(ApamaModule::MonFile(prefix.into())),
unsupported_type => Err(InternalError::UnsupportedModuleType {
module_type: unsupported_type.into(),
}),
},
None => Err(InternalError::ModuleTypeNotProvided {
module_name: module.into(),
}),
}
}
fn install_project(project_archive_path: &Path) -> Result<(), InternalError> {
let tedge_apama_project_path: &Path = Path::new(TEDGE_APAMA_PROJECT_DIR);
let tmp_apama_project_path: &Path = Path::new(TMP_APAMA_PROJECT_DIR);
let archive_file = File::open(&project_archive_path)?;
let mut archive = zip::ZipArchive::new(archive_file)?;
if let Err(zip::result::ZipError::FileNotFound) = archive.by_name("project/") {
return Err(InternalError::InvalidProjectArchive);
}
println!("Extracting the archive at {:?}", project_archive_path);
archive.extract(tmp_apama_project_path)?;
println!("Extraction successful");
// Deleting existing project as the rename API expects the target dir to be empty
delete_project()?;
println!(
"Installing newly extracted project to {}",
TEDGE_APAMA_PROJECT_DIR
);
fs::create_dir_all(tedge_apama_project_path)?;
fs::rename(
tmp_apama_project_path.join("project"),
tedge_apama_project_path,
)?;
println!("Installation of new project successful");
restart_apama_service()?;
wait_for_apama_correlator_ready()?;
Ok(())
}
fn restart_apama_service() -> Result<(), InternalError> {
println!("Restarting apama to load the new project");
run_cmd("service", "apama restart")?;
println!("Restart of apama service successful");
Ok(())
}
fn wait_for_apama_correlator_ready() -> Result<(), InternalError> {
println!("Waiting for apama correator to be ready for upto 10 seconds");
run_cmd(APAMA_ENV_EXE, "engine_management --waitFor 10")?;
println!("Apama correator is ready");
Ok(())
}
fn remove_project(_project_name: &str) -> Result<(), InternalError> {
let tedge_apama_project_path: &Path = Path::new(TEDGE_APAMA_PROJECT_DIR);
if tedge_apama_project_path.exists() {
stop_apama_service()?;
delete_project()?;
} else {
println!("Doing nothing as there's no project installed");
}
Ok(())
}
fn delete_project() -> Result<(), InternalError> {
println!("Removing existing project at {}", TEDGE_APAMA_PROJECT_DIR);
let result = fs::remove_dir_all(TEDGE_APAMA_PROJECT_DIR);
if let Err(err) = result {
if err.kind() != ErrorKind::NotFound {
return Err(InternalError::from(err));
}
}
println!("Removal of existing project successful");
Ok(())
}
fn stop_apama_service() -> Result<(), InternalError> {
println!("Stopping apama service");
run_cmd("service", "apama stop")?;
println!("Stopping apama service successful");
Ok(())
}
fn get_installed_monitors() -> Result<Vec<String>, InternalError> {
// Run `engine_inspect -m -r` command to list all monitors in raw format
let output = Command::new(APAMA_ENV_EXE)
.arg(ENGINE_INSPECT_CMD)
.arg("-m")
.arg("-r")
.stdin(Stdio::null())
.output()
.map_err(|err| InternalError::exec_error(ENGINE_INSPECT_CMD, err))?;
let output = String::from_utf8(output.stdout)?;
// The output contains monitor names and their instance counts separated by a space as follows:
// ```
// TedgeDemoMonitor 1
// TedgeTestMonitor 1
// ```
let mon_files = output
.lines()
// The first line of the output could "WARNING: JAVA_HOME not set" which is filtered out
.filter(|line| !line.starts_with("WARNING:"))
// The counts are filtered out too
.filter_map(|line| line.split_whitespace().next())
.map(|line| line.into())
.collect();
Ok(mon_files)
}
fn install_or_update_monitor(mon_name: &str, mon_file_path: &str) -> Result<(), InternalError> {
let installed_monitors = get_installed_monitors()?;
// If an existing monitor needs to be updated, older version needs to be removed first before installing the new one
if installed_monitors.contains(&mon_name.to_string()) {
remove_monitor(mon_name)?;
install_monitor(mon_file_path)
} else {
install_monitor(mon_file_path)
}
}
fn install_monitor(mon_file_path: &str) -> Result<(), InternalError> {
run_cmd(
APAMA_ENV_EXE,
format!("{} {}", ENGINE_INJECT_CMD, mon_file_path).as_str(),
)
}
fn remove_monitor(mon_name: &str) -> Result<(), InternalError> {
run_cmd(
APAMA_ENV_EXE,
format!("{} {}", ENGINE_DELETE_CMD, mon_name).as_str(),
)
}
fn run_cmd(cmd: &str, args: &str) -> Result<(), InternalError> {
let args: Vec<&str> = args.split_whitespace().collect();
let exit_status = Command::new(cmd)
.args(args)
.stdin(Stdio::null())
.status()
.map_err(|err| InternalError::exec_error(cmd, err))?;
if exit_status.success() {
Ok(())
} else {
Err(InternalError::ExecFailure {
cmd: cmd.into(),
exit_status,
})
}
}
fn main() {
// On usage error, the process exits with a status code of 1
let apama = ApamaCli::parse();
match run(apama.operation) {
Ok(()) => {
std::process::exit(0);
}
Err(err) => {
eprintln!("ERROR: {}", err);
std::process::exit(2);
}
}
}
#[cfg(test)]
mod tests {
use std::fs::{self, File};
use tempfile::TempDir;
use crate::get_project_name;
#[test]
fn get_project_name_project_descriptor_xml() {
let temp_dir = TempDir::new().unwrap();
let project_dir_path = temp_dir.path();
let project_descriptor_path = project_dir_path.join(".project");
fs::write(
project_descriptor_path.as_path(),
r#"<projectDescription><name>quickstart-project</name></projectDescription>"#,
)
.expect("Failed to create project descriptor xml file");
assert_eq!(get_project_name(project_dir_path), "quickstart-project");
}
#[test]
fn get_project_name_empty_project() {
let temp_dir = TempDir::new().unwrap();
let project_dir_path = temp_dir.path();
let project_descriptor_path = project_dir_path.join(".project");
File::create(project_descriptor_path.as_path())
.expect("Failed to create empty project descriptor xml file");
assert_eq!(get_project_name(temp_dir.path()), "unnamed");
}
#[test]
fn get_project_name_empty_project_descriptor() {
let temp_dir = TempDir::new().unwrap();
assert_eq!(get_project_name(temp_dir.path()), "unnamed");
}
#[test]
fn get_project_name_invalid_project_descriptor() {
let temp_dir = TempDir::new().unwrap();
let project_dir_path = temp_dir.path();
let project_descriptor_path = project_dir_path.join(".project");
fs::write(project_descriptor_path.clone(), "not an xml").unwrap();
assert_eq!(get_project_name(project_dir_path), "unnamed");
}
}
| 32.348774 | 120 | 0.628959 |
ab9ec2801fb7d5f5753a6b60076f98dc89b8a1e4 | 7,206 | use crate::paths::{binst_bin_dir, binst_tmp_dir, os_target};
use crate::utils::{clean_path, sym_link};
use clap::ArgMatches;
use regex::Regex;
use semver::Version;
use std::fs::{create_dir_all, remove_file, File};
use std::io::Write;
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
use self::error::BinRepoError;
mod aws_provider;
pub mod error;
mod install;
mod publish;
pub const BINST_REPO_URL: &'static str = "https://repo.binst.io/";
pub const BINST_REPO_BUCKET: &'static str = "binst-repo";
pub const BINST_REPO_AWS_PROFILE: &'static str = "binst-repo-user";
// env names for the binst repo
pub const ENV_BINST_REPO_AWS_KEY_ID: &'static str = "BINST_REPO_AWS_KEY_ID";
pub const ENV_BINST_REPO_AWS_KEY_SECRET: &'static str = "BINST_REPO_AWS_KEY_SECRET";
pub const ENV_BINST_REPO_AWS_REGION: &'static str = "BINST_REPO_AWS_REGION";
pub const MAIN_STREAM: &str = "main";
#[derive(Debug)]
enum RepoInfo {
// local path dir
Local(String),
// S3, only support via profile for now
S3(S3Info),
// http/https, only for install
Http(String),
}
impl RepoInfo {
fn url(&self) -> &str {
match self {
RepoInfo::Local(url) => url,
RepoInfo::S3(s3_info) => &s3_info.url,
RepoInfo::Http(url) => url,
}
}
}
/// Builders
impl RepoInfo {
fn binst_publish_repo() -> RepoInfo {
let s3_info = S3Info {
url: format!("s3://{}", BINST_REPO_BUCKET),
bucket: BINST_REPO_BUCKET.to_string(),
base: "".to_string(),
profile: Some(BINST_REPO_AWS_PROFILE.to_string()),
};
RepoInfo::S3(s3_info)
}
fn binst_install_repo() -> RepoInfo {
RepoInfo::Http(clean_path(BINST_REPO_URL))
}
fn from_repo_string(repo: &str, profile: Option<&str>) -> Result<RepoInfo, BinRepoError> {
let repo_info = if repo.starts_with("s3://") {
RepoInfo::S3(S3Info::from_s3_url(repo, profile)?)
} else if repo.starts_with("http://") || repo.starts_with("https://") {
RepoInfo::Http(clean_path(repo))
} else {
RepoInfo::Local(clean_path(repo))
};
Ok(repo_info)
}
}
#[derive(Debug)]
pub struct S3Info {
url: String,
bucket: String,
base: String,
profile: Option<String>,
}
impl S3Info {
pub fn from_s3_url(s3_url: &str, profile: Option<&str>) -> Result<S3Info, BinRepoError> {
let repo_path = &s3_url[5..];
let mut parts = repo_path.splitn(2, '/');
let bucket = match parts.next() {
Some(bucket) => {
if bucket.len() == 0 {
return Err(BinRepoError::RepoInvalidS3(s3_url.to_owned()));
}
bucket
}
None => return Err(BinRepoError::RepoInvalidS3(s3_url.to_owned())),
}
.to_owned();
let base = match parts.next() {
Some(base) => {
if base.starts_with("/") {
return Err(BinRepoError::RepoInvalidS3(s3_url.to_owned()));
}
base
}
None => "", // empty string for empty base path
}
.to_owned();
let profile = profile.map(|v| v.to_owned());
let url = s3_url.to_string();
Ok(S3Info {
url,
bucket,
base,
profile,
})
}
}
#[derive(Debug)]
pub struct BinRepo {
bin_name: String,
install_repo: RepoInfo,
publish_repo: RepoInfo,
target: Option<String>,
}
// repo builder function(s) and common methods
impl BinRepo {
pub fn new(bin_name: &str, argc: &ArgMatches, publish: bool) -> Result<Self, BinRepoError> {
let bin_name = bin_name.to_string();
let target = if publish {
argc.value_of("target").map(|target| target.to_string())
} else {
None
};
// build the RepoInfo
let argc_profile = argc.value_of("profile");
let argc_repo = argc.value_of("repo");
let (install_repo, publish_repo) = if let Some(repo) = argc_repo {
let install_repo = RepoInfo::from_repo_string(repo, argc_profile)?;
let publish_repo = RepoInfo::from_repo_string(repo, argc_profile)?;
(install_repo, publish_repo)
} else {
(RepoInfo::binst_install_repo(), RepoInfo::binst_publish_repo())
};
Ok(BinRepo {
bin_name: bin_name.to_owned(),
install_repo,
publish_repo,
target,
})
}
fn origin_bin_target_uri(&self, stream_or_path: &str) -> String {
let target = self.target.as_ref().map(|s| s.to_string()).unwrap_or(os_target());
format!("{}/{}/{}", self.bin_name, target, stream_or_path)
}
}
// region: BinRepo path function helpers
fn make_bin_temp_dir(bin_name: &str) -> Result<PathBuf, BinRepoError> {
let start = SystemTime::now().duration_since(UNIX_EPOCH).expect("time anomaly?").as_millis();
let path = binst_tmp_dir(Some(&format!("{}-{}", bin_name, start)))?;
Ok(path)
}
fn get_release_bin(name: &str, target: &Option<String>) -> Result<PathBuf, BinRepoError> {
// Note this is to support cross compilation (x86_64-apple-darwin on arm64)
let bin_file = if let Some(target) = target {
Path::new("./target").join(target).join("release").join(name)
} else {
Path::new("./target/release").join(name)
};
match bin_file.is_file() {
true => Ok(bin_file),
false => Err(BinRepoError::NoReleaseBinFile),
}
}
pub fn extract_stream(version: &Version) -> String {
if version.pre.len() > 0 {
let pre = version.pre.as_str();
let rx = Regex::new("[a-zA-Z-]+").unwrap(); // can't fail if it worked once
let stream = rx.find(&pre).and_then(|m| Some(m.as_str())).unwrap_or("pre");
let stream = if stream.ends_with("-") {
&stream[..stream.len() - 1]
} else {
stream
};
stream.to_owned()
} else {
MAIN_STREAM.to_string()
}
}
// endregion: BinRepo path function helpers
// region: Self/Install/Update helpers
//// Returns version path part.
pub fn get_version_part(version: &Version) -> String {
format!("{}", version.to_string())
}
pub fn create_bin_symlink(bin_name: &str, unpacked_bin: &PathBuf) -> Result<PathBuf, BinRepoError> {
// make sure the .binst/bin/ directory exists
let bin_dir = binst_bin_dir();
if !bin_dir.is_dir() {
create_dir_all(&bin_dir)?;
}
if !unpacked_bin.is_file() {
return Err(BinRepoError::UnpackedBinFileNotFound(unpacked_bin.to_string_lossy().to_string()));
}
let bin_symlink_path = binst_bin_dir().join(bin_name);
if bin_symlink_path.is_file() {
remove_file(&bin_symlink_path)?;
}
sym_link(&unpacked_bin, &bin_symlink_path)?;
Ok(bin_symlink_path)
}
pub fn create_install_toml(package_dir: &PathBuf, repo: &str, stream: &str, version: &Version) -> Result<(), BinRepoError> {
let install_content = create_install_toml_content(repo, stream, version);
let install_path = package_dir.join("install.toml");
File::create(&install_path)?.write_all(install_content.as_bytes())?;
Ok(())
}
fn create_install_toml_content(repo: &str, stream: &str, version: &Version) -> String {
format!(
r#"[install]
repo = "{}"
stream = "{}"
version = "{}"
"#,
repo, stream, version
)
}
// endregion: Self/Install/Update helpers
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extract_stream() {
fn run(v: &str) -> String {
extract_stream(&Version::parse(v).unwrap())
}
assert_eq!("main", run("0.1.3"));
assert_eq!("main", run("0.1.0"));
assert_eq!("rc", run("0.1.3-rc"));
assert_eq!("rc", run("0.1.3-rc-1"));
assert_eq!("rc-big", run("0.1.3-rc-big-1"));
assert_eq!("beta", run("0.1.3-beta.2"));
assert_eq!("beta", run("0.1.3-beta2"));
assert_eq!("big-beta", run("0.1.3-big-beta2"));
assert_eq!("pre", run("0.1.3-123"));
}
}
| 26.29927 | 124 | 0.672634 |
ac46424d74a989c437ecb3ede3bcd1f061361dc7 | 9,183 | #![warn(missing_docs)]
use std::fmt;
use crossbeam::channel::{Receiver, Sender};
use futures::channel::oneshot;
use snafu::Snafu;
use crate::{
bdev::nexus::VerboseError,
core::{BlockDeviceDescriptor, CoreError, Descriptor},
nexus_uri::NexusBdevError,
};
use spdk_rs::DmaError;
use super::rebuild_impl::*;
#[derive(Debug, Snafu, Clone)]
#[snafu(visibility = "pub(crate)")]
#[allow(missing_docs)]
/// Various rebuild errors when interacting with a rebuild job or
/// encountered during a rebuild copy
pub enum RebuildError {
#[snafu(display("Failed to allocate buffer for the rebuild copy"))]
NoCopyBuffer { source: DmaError },
#[snafu(display("Failed to validate rebuild job creation parameters"))]
InvalidParameters {},
#[snafu(display("Failed to get a handle for bdev {}", bdev))]
NoBdevHandle { source: CoreError, bdev: String },
#[snafu(display("Bdev {} not found", bdev))]
BdevNotFound { source: CoreError, bdev: String },
#[snafu(display("IO failed for bdev {}", bdev))]
IoError { source: CoreError, bdev: String },
#[snafu(display("Read IO failed for bdev {}", bdev))]
ReadIoError { source: CoreError, bdev: String },
#[snafu(display("Write IO failed for bdev {}", bdev))]
WriteIoError { source: CoreError, bdev: String },
#[snafu(display("Failed to find rebuild job {}", job))]
JobNotFound { job: String },
#[snafu(display("Job {} already exists", job))]
JobAlreadyExists { job: String },
#[snafu(display("Missing rebuild destination {}", job))]
MissingDestination { job: String },
#[snafu(display(
"{} operation failed because current rebuild state is {}.",
operation,
state,
))]
OpError { operation: String, state: String },
#[snafu(display("Existing pending state {}", state,))]
StatePending { state: String },
#[snafu(display(
"Failed to lock LBA range for blk {}, len {}, with error: {}",
blk,
len,
source,
))]
RangeLockError {
blk: u64,
len: u64,
source: nix::errno::Errno,
},
#[snafu(display(
"Failed to unlock LBA range for blk {}, len {}, with error: {}",
blk,
len,
source,
))]
RangeUnLockError {
blk: u64,
len: u64,
source: nix::errno::Errno,
},
#[snafu(display("Failed to get bdev name from URI {}", uri))]
BdevInvalidUri { source: NexusBdevError, uri: String },
}
#[derive(Debug, PartialEq, Copy, Clone)]
/// allowed states for a rebuild job
pub enum RebuildState {
/// Init when the job is newly created
Init,
/// Running when the job is rebuilding
Running,
/// Stopped when the job is halted as requested through stop
/// and pending its removal
Stopped,
/// Paused when the job is paused as requested through pause
Paused,
/// Failed when an IO (R/W) operation was failed
/// there are no retries as it currently stands
Failed,
/// Completed when the rebuild was successfully completed
Completed,
}
impl fmt::Display for RebuildState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
RebuildState::Init => write!(f, "init"),
RebuildState::Running => write!(f, "running"),
RebuildState::Stopped => write!(f, "stopped"),
RebuildState::Paused => write!(f, "paused"),
RebuildState::Failed => write!(f, "failed"),
RebuildState::Completed => write!(f, "completed"),
}
}
}
/// A rebuild job is responsible for managing a rebuild (copy) which reads
/// from source_hdl and writes into destination_hdl from specified start to end
pub struct RebuildJob {
/// name of the nexus associated with the rebuild job
pub nexus: String,
/// descriptor for the nexus
pub(super) nexus_descriptor: Descriptor,
/// source URI of the healthy child to rebuild from
pub source: String,
/// target URI of the out of sync child in need of a rebuild
pub destination: String,
pub(super) block_size: u64,
pub(super) range: std::ops::Range<u64>,
pub(super) next: u64,
pub(super) segment_size_blks: u64,
pub(super) task_pool: RebuildTasks,
pub(super) notify_fn: fn(String, String) -> (),
/// channel used to signal rebuild update
pub notify_chan: (Sender<RebuildState>, Receiver<RebuildState>),
/// current state of the rebuild job
pub(super) states: RebuildStates,
/// channel list which allows the await of the rebuild
pub(super) complete_chan: Vec<oneshot::Sender<RebuildState>>,
/// rebuild copy error, if any
pub error: Option<RebuildError>,
// Pre-opened descriptors for source/destination block device.
pub(super) src_descriptor: Box<dyn BlockDeviceDescriptor>,
pub(super) dst_descriptor: Box<dyn BlockDeviceDescriptor>,
}
// TODO: is `RebuildJob` really a Send type?
unsafe impl Send for RebuildJob {}
impl fmt::Debug for RebuildJob {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RebuildJob")
.field("nexus", &self.nexus)
.field("source", &self.source)
.field("destination", &self.destination)
.finish()
}
}
/// rebuild statistics
pub struct RebuildStats {
/// total number of blocks to recover
pub blocks_total: u64,
/// number of blocks recovered
pub blocks_recovered: u64,
/// rebuild progress in %
pub progress: u64,
/// granularity of each recovery copy in blocks
pub segment_size_blks: u64,
/// size in bytes of each block
pub block_size: u64,
/// total number of concurrent rebuild tasks
pub tasks_total: u64,
/// number of current active tasks
pub tasks_active: u64,
}
/// Public facing operations on a Rebuild Job
pub trait ClientOperations {
/// Collects statistics from the job
fn stats(&self) -> RebuildStats;
/// Schedules the job to start in a future and returns a complete channel
/// which can be waited on
fn start(
&mut self,
) -> Result<oneshot::Receiver<RebuildState>, RebuildError>;
/// Stops the job which then triggers the completion hooks
fn stop(&mut self) -> Result<(), RebuildError>;
/// pauses the job which can then be later resumed
fn pause(&mut self) -> Result<(), RebuildError>;
/// Resumes a previously paused job
/// this could be used to mitigate excess load on the source bdev, eg
/// too much contention with frontend IO
fn resume(&mut self) -> Result<(), RebuildError>;
/// Forcefully terminates the job, overriding any pending client operation
/// returns an async channel which can be used to await for termination
fn terminate(&mut self) -> oneshot::Receiver<RebuildState>;
}
impl RebuildJob {
/// Creates a new RebuildJob which rebuilds from source URI to target URI
/// from start to end (of the data partition); notify_fn callback is called
/// when the rebuild state is updated - with the nexus and destination
/// URI as arguments
pub fn create<'a>(
nexus: &str,
source: &str,
destination: &'a str,
range: std::ops::Range<u64>,
notify_fn: fn(String, String) -> (),
) -> Result<&'a mut Self, RebuildError> {
Self::new(nexus, source, destination, range, notify_fn)?.store()?;
Self::lookup(destination)
}
/// Lookup a rebuild job by its destination uri and return it
pub fn lookup(name: &str) -> Result<&mut Self, RebuildError> {
if let Some(job) = Self::get_instances().get_mut(name) {
Ok(job)
} else {
Err(RebuildError::JobNotFound {
job: name.to_owned(),
})
}
}
/// Lookup all rebuilds jobs with name as its source
pub fn lookup_src(name: &str) -> Vec<&mut Self> {
Self::get_instances()
.iter_mut()
.filter(|j| j.1.source == name)
.map(|j| j.1.as_mut())
.collect::<Vec<_>>()
}
/// Lookup a rebuild job by its destination uri then remove and return it
pub fn remove(name: &str) -> Result<Self, RebuildError> {
match Self::get_instances().remove(name) {
Some(job) => Ok(*job),
None => Err(RebuildError::JobNotFound {
job: name.to_owned(),
}),
}
}
/// Number of rebuild job instances
pub fn count() -> usize {
Self::get_instances().len()
}
/// State of the rebuild job
pub fn state(&self) -> RebuildState {
self.states.current
}
/// Error description
pub fn error_desc(&self) -> String {
match self.error.as_ref() {
Some(e) => e.verbose(),
_ => "".to_string(),
}
}
/// ClientOperations trait
/// todo: nexus should use this for all interaction with the job
pub fn as_client(&mut self) -> &mut impl ClientOperations {
self
}
}
impl RebuildState {
/// Final update for a rebuild job
pub fn done(self) -> bool {
matches!(self, Self::Stopped | Self::Failed | Self::Completed)
}
}
| 33.637363 | 79 | 0.62289 |
29fc2f98725c740b072f267dd4b13c2d90a8df50 | 19,957 | use std::env;
use std::fs::File;
use std::io::prelude::*;
use std::io::{self, BufReader};
use std::path::PathBuf;
use std::process::Command;
const IN: &str = "neon.spec";
const ARM_OUT: &str = "generated.rs";
const AARCH64_OUT: &str = "generated.rs";
const UINT_TYPES: [&str; 6] = [
"uint8x8_t",
"uint8x16_t",
"uint16x4_t",
"uint16x8_t",
"uint32x2_t",
"uint32x4_t",
];
const UINT_TYPES_64: [&str; 2] = ["uint64x1_t", "uint64x2_t"];
const INT_TYPES: [&str; 6] = [
"int8x8_t",
"int8x16_t",
"int16x4_t",
"int16x8_t",
"int32x2_t",
"int32x4_t",
];
const INT_TYPES_64: [&str; 2] = ["int64x1_t", "int64x2_t"];
const FLOAT_TYPES: [&str; 2] = [
//"float8x8_t", not supported by rust
//"float8x16_t", not supported by rust
//"float16x4_t", not supported by rust
//"float16x8_t", not supported by rust
"float32x2_t",
"float32x4_t",
];
const FLOAT_TYPES_64: [&str; 2] = [
//"float8x8_t", not supported by rust
//"float8x16_t", not supported by rust
//"float16x4_t", not supported by rust
//"float16x8_t", not supported by rust
"float64x1_t",
"float64x2_t",
];
fn type_len(t: &str) -> usize {
match t {
"int8x8_t" => 8,
"int8x16_t" => 16,
"int16x4_t" => 4,
"int16x8_t" => 8,
"int32x2_t" => 2,
"int32x4_t" => 4,
"int64x1_t" => 1,
"int64x2_t" => 2,
"uint8x8_t" => 8,
"uint8x16_t" => 16,
"uint16x4_t" => 4,
"uint16x8_t" => 8,
"uint32x2_t" => 2,
"uint32x4_t" => 4,
"uint64x1_t" => 1,
"uint64x2_t" => 2,
"float16x4_t" => 4,
"float16x8_t" => 8,
"float32x2_t" => 2,
"float32x4_t" => 4,
"float64x1_t" => 1,
"float64x2_t" => 2,
"poly64x1_t" => 1,
"poly64x2_t" => 2,
_ => panic!("unknown type: {}", t),
}
}
fn type_to_suffix(t: &str) -> &str {
match t {
"int8x8_t" => "_s8",
"int8x16_t" => "q_s8",
"int16x4_t" => "_s16",
"int16x8_t" => "q_s16",
"int32x2_t" => "_s32",
"int32x4_t" => "q_s32",
"int64x1_t" => "_s64",
"int64x2_t" => "q_s64",
"uint8x8_t" => "_u8",
"uint8x16_t" => "q_u8",
"uint16x4_t" => "_u16",
"uint16x8_t" => "q_u16",
"uint32x2_t" => "_u32",
"uint32x4_t" => "q_u32",
"uint64x1_t" => "_u64",
"uint64x2_t" => "q_u64",
"float16x4_t" => "_f16",
"float16x8_t" => "q_f16",
"float32x2_t" => "_f32",
"float32x4_t" => "q_f32",
"float64x1_t" => "_f64",
"float64x2_t" => "q_f64",
"poly64x1_t" => "_p64",
"poly64x2_t" => "q_p64",
_ => panic!("unknown type: {}", t),
}
}
fn type_to_global_type(t: &str) -> &str {
match t {
"int8x8_t" => "i8x8",
"int8x16_t" => "i8x16",
"int16x4_t" => "i16x4",
"int16x8_t" => "i16x8",
"int32x2_t" => "i32x2",
"int32x4_t" => "i32x4",
"int64x1_t" => "i64x1",
"int64x2_t" => "i64x2",
"uint8x8_t" => "u8x8",
"uint8x16_t" => "u8x16",
"uint16x4_t" => "u16x4",
"uint16x8_t" => "u16x8",
"uint32x2_t" => "u32x2",
"uint32x4_t" => "u32x4",
"uint64x1_t" => "u64x1",
"uint64x2_t" => "u64x2",
"float16x4_t" => "f16x4",
"float16x8_t" => "f16x8",
"float32x2_t" => "f32x2",
"float32x4_t" => "f32x4",
"float64x1_t" => "f64",
"float64x2_t" => "f64x2",
"poly64x1_t" => "i64x1",
"poly64x2_t" => "i64x2",
_ => panic!("unknown type: {}", t),
}
}
fn type_to_native_type(t: &str) -> &str {
match t {
"int8x8_t" => "i8",
"int8x16_t" => "i8",
"int16x4_t" => "i16",
"int16x8_t" => "i16",
"int32x2_t" => "i32",
"int32x4_t" => "i32",
"int64x1_t" => "i64",
"int64x2_t" => "i64",
"uint8x8_t" => "u8",
"uint8x16_t" => "u8",
"uint16x4_t" => "u16",
"uint16x8_t" => "u16",
"uint32x2_t" => "u32",
"uint32x4_t" => "u32",
"uint64x1_t" => "u64",
"uint64x2_t" => "u64",
"float16x4_t" => "f16",
"float16x8_t" => "f16",
"float32x2_t" => "f32",
"float32x4_t" => "f32",
"float64x1_t" => "f64",
"float64x2_t" => "f64",
"poly64x1_t" => "i64",
"poly64x2_t" => "i64",
_ => panic!("unknown type: {}", t),
}
}
fn type_to_ext(t: &str) -> &str {
match t {
"int8x8_t" => "v8i8",
"int8x16_t" => "v16i8",
"int16x4_t" => "v4i16",
"int16x8_t" => "v8i16",
"int32x2_t" => "v2i32",
"int32x4_t" => "v4i32",
"int64x1_t" => "v1i64",
"int64x2_t" => "v2i64",
"uint8x8_t" => "v8i8",
"uint8x16_t" => "v16i8",
"uint16x4_t" => "v4i16",
"uint16x8_t" => "v8i16",
"uint32x2_t" => "v2i32",
"uint32x4_t" => "v4i32",
"uint64x1_t" => "v1i64",
"uint64x2_t" => "v2i64",
"float16x4_t" => "v4f16",
"float16x8_t" => "v8f16",
"float32x2_t" => "v2f32",
"float32x4_t" => "v4f32",
"float64x1_t" => "v1f64",
"float64x2_t" => "v2f64",
/*
"poly64x1_t" => "i64x1",
"poly64x2_t" => "i64x2",
*/
_ => panic!("unknown type for extension: {}", t),
}
}
fn values(t: &str, vs: &[String]) -> String {
if vs.len() == 1 && !t.contains('x') {
format!(": {} = {}", t, vs[0])
} else {
format!(
" = {}::new([{}])",
t,
vs.iter()
.map(|v| map_val(type_to_global_type(t), v))
.map(|v| format!("{}{}", v, type_to_native_type(t)))
.collect::<Vec<String>>()
.join(", ")
)
}
}
fn max_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0xFF",
"u16" => "0xFF_FF",
"u32" => "0xFF_FF_FF_FF",
"u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
"i8x" => "0x7F",
"i16" => "0x7F_FF",
"i32" => "0x7F_FF_FF_FF",
"i64" => "0x7F_FF_FF_FF_FF_FF_FF_FF",
"f32" => "3.40282347e+38",
"f64" => "1.7976931348623157e+308",
_ => panic!("No TRUE for type {}", t),
}
}
fn min_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0",
"u16" => "0",
"u32" => "0",
"u64" => "0",
"i8x" => "-128",
"i16" => "-32768",
"i32" => "-2147483648",
"i64" => "-9223372036854775808",
"f32" => "-3.40282347e+38",
"f64" => "-1.7976931348623157e+308",
_ => panic!("No TRUE for type {}", t),
}
}
fn true_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0xFF",
"u16" => "0xFF_FF",
"u32" => "0xFF_FF_FF_FF",
"u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
_ => panic!("No TRUE for type {}", t),
}
}
fn ff_val(t: &str) -> &'static str {
match &t[..3] {
"u8x" => "0xFF",
"u16" => "0xFF_FF",
"u32" => "0xFF_FF_FF_FF",
"u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
"i8x" => "0xFF",
"i16" => "0xFF_FF",
"i32" => "0xFF_FF_FF_FF",
"i64" => "0xFF_FF_FF_FF_FF_FF_FF_FF",
_ => panic!("No TRUE for type {}", t),
}
}
fn false_val(_t: &str) -> &'static str {
"0"
}
fn map_val<'v>(t: &str, v: &'v str) -> &'v str {
match v {
"FALSE" => false_val(t),
"TRUE" => true_val(t),
"MAX" => min_val(t),
"MIN" => max_val(t),
"FF" => ff_val(t),
o => o,
}
}
#[allow(clippy::too_many_arguments)]
fn gen_aarch64(
current_comment: &str,
current_fn: &Option<String>,
name: &str,
current_aarch64: &Option<String>,
link_aarch64: &Option<String>,
in_t: &str,
out_t: &str,
current_tests: &[(Vec<String>, Vec<String>, Vec<String>)],
) -> (String, String) {
let _global_t = type_to_global_type(in_t);
let _global_ret_t = type_to_global_type(out_t);
let current_fn = if let Some(current_fn) = current_fn.clone() {
if link_aarch64.is_some() {
panic!("[{}] Can't specify link and fn at the same time.", name)
}
current_fn
} else {
if link_aarch64.is_none() {
panic!("[{}] Either fn or link-aarch have to be specified.", name)
}
format!("{}_", name)
};
let current_aarch64 = current_aarch64.clone().unwrap();
let ext_c = if let Some(link_aarch64) = link_aarch64.clone() {
let ext = type_to_ext(in_t);
format!(
r#"
#[allow(improper_ctypes)]
extern "C" {{
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.{}")]
fn {}(a: {}, a: {}) -> {};
}}
"#,
link_aarch64.replace("_EXT_", ext),
current_fn,
in_t,
in_t,
out_t
)
} else {
String::new()
};
let function = format!(
r#"
{}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr({}))]
pub unsafe fn {}(a: {}, b: {}) -> {} {{
{}{}(a, b)
}}
"#,
current_comment, current_aarch64, name, in_t, in_t, out_t, ext_c, current_fn,
);
let test = gen_test(name, &in_t, &out_t, current_tests, type_len(in_t));
(function, test)
}
fn gen_test(
name: &str,
in_t: &str,
out_t: &str,
current_tests: &[(Vec<String>, Vec<String>, Vec<String>)],
len: usize,
) -> String {
let mut test = format!(
r#"
// FIXME: #[simd_test(enable = "neon")]
#[test]
fn test_{}() {{unsafe {{
"#,
name,
);
for (a, b, e) in current_tests {
let a: Vec<String> = a.iter().take(len).cloned().collect();
let b: Vec<String> = b.iter().take(len).cloned().collect();
let e: Vec<String> = e.iter().take(len).cloned().collect();
let t = format!(
r#"
let a{};
let b{};
let e{};
let r: {} = transmute({}(transmute(a), transmute(b)));
assert!(cmp_arm(r, e));
"#,
values(in_t, &a),
values(in_t, &b),
values(out_t, &e),
out_t,
name
);
test.push_str(&t);
}
test.push_str(" }}\n");
test
}
#[allow(clippy::too_many_arguments)]
fn gen_arm(
current_comment: &str,
current_fn: &Option<String>,
name: &str,
current_arm: &str,
link_arm: &Option<String>,
current_aarch64: &Option<String>,
link_aarch64: &Option<String>,
in_t: &str,
out_t: &str,
current_tests: &[(Vec<String>, Vec<String>, Vec<String>)],
) -> (String, String) {
let _global_t = type_to_global_type(in_t);
let _global_ret_t = type_to_global_type(out_t);
let current_aarch64 = current_aarch64
.clone()
.unwrap_or_else(|| current_arm.to_string());
let current_fn = if let Some(current_fn) = current_fn.clone() {
if link_aarch64.is_some() || link_arm.is_some() {
panic!(
"[{}] Can't specify link and function at the same time. {} / {:?} / {:?}",
name, current_fn, link_aarch64, link_arm
)
}
current_fn
} else {
if link_aarch64.is_none() || link_arm.is_none() {
panic!(
"[{}] Either fn or link-arm and link-aarch have to be specified.",
name
)
}
format!("{}_", name)
};
let ext_c =
if let (Some(link_arm), Some(link_aarch64)) = (link_arm.clone(), link_aarch64.clone()) {
let ext = type_to_ext(in_t);
format!(
r#"#[allow(improper_ctypes)]
extern "C" {{
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.{}")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.{}")]
fn {}(a: {}, b: {}) -> {};
}}
"#,
link_arm.replace("_EXT_", ext),
link_aarch64.replace("_EXT_", ext),
current_fn,
in_t,
in_t,
out_t
)
} else {
String::new()
};
let function = format!(
r#"
{}
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr({}))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}))]
pub unsafe fn {}(a: {}, b: {}) -> {} {{
{}{}(a, b)
}}
"#,
current_comment, current_arm, current_aarch64, name, in_t, in_t, out_t, ext_c, current_fn,
);
let test = gen_test(name, &in_t, &out_t, current_tests, type_len(in_t));
(function, test)
}
fn main() -> io::Result<()> {
let f = File::open(IN).expect("Failed to open neon.spec");
let f = BufReader::new(f);
let mut current_comment = String::new();
let mut current_name: Option<String> = None;
let mut current_fn: Option<String> = None;
let mut current_arm: Option<String> = None;
let mut current_aarch64: Option<String> = None;
let mut link_arm: Option<String> = None;
let mut link_aarch64: Option<String> = None;
let mut a: Vec<String> = Vec::new();
let mut b: Vec<String> = Vec::new();
let mut current_tests: Vec<(Vec<String>, Vec<String>, Vec<String>)> = Vec::new();
//
// THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY
//
let mut out_arm = String::from(
r#"
use crate::arm::*;
#[cfg(target_arch = "aarch64")]
use std::arch::aarch64::*;
#[cfg(target_arch = "aarch64")]
use crate::aarch64::*;
use crate::simd_llvm::*;
#[cfg(test)]
use assert_instr_macro::assert_instr;
"#,
);
let mut tests_arm = String::from(
r#"
#[cfg(test)]
#[allow(overflowing_literals)]
mod test {
#[cfg(target_arch = "aarch64")]
use std::arch::aarch64::*;
#[cfg(target_arch = "arm")]
use std::arch::arm::*;
#[cfg(target_arch = "arm")]
use crate::arm::*;
#[cfg(target_arch = "aarch64")]
use crate::aarch64::*;
use std::mem::transmute;
use simd_test_macro::simd_test;
use crate::cmparm::cmp_arm;
use crate::NeonInit;
"#,
);
//
// THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY
//
let mut out_aarch64 = String::from(
r#"
use crate::aarch64::*;
use std::arch::aarch64::*;
use crate::simd_llvm::*;
#[cfg(test)]
use assert_instr_macro::assert_instr;
"#,
);
let mut tests_aarch64 = String::from(
r#"
#[cfg(test)]
mod test {
use std::arch::aarch64::*;
use crate::aarch64::*;
use std::mem::transmute;
use simd_test_macro::simd_test;
use crate::cmparm::cmp_arm;
use crate::NeonInit;
"#,
);
for line in f.lines() {
let line = line.unwrap();
if line.is_empty() {
continue;
}
if line.starts_with("/// ") {
current_comment = line;
current_name = None;
current_fn = None;
current_arm = None;
current_aarch64 = None;
link_aarch64 = None;
link_arm = None;
current_tests = Vec::new();
} else if line.starts_with("//") {
} else if line.starts_with("name = ") {
current_name = Some(String::from(&line[7..]));
} else if line.starts_with("fn = ") {
current_fn = Some(String::from(&line[5..]));
} else if line.starts_with("arm = ") {
current_arm = Some(String::from(&line[6..]));
} else if line.starts_with("aarch64 = ") {
current_aarch64 = Some(String::from(&line[10..]));
} else if line.starts_with("a = ") {
a = line[4..].split(',').map(|v| v.trim().to_string()).collect();
} else if line.starts_with("b = ") {
b = line[4..].split(',').map(|v| v.trim().to_string()).collect();
} else if line.starts_with("validate ") {
let e = line[9..].split(',').map(|v| v.trim().to_string()).collect();
current_tests.push((a.clone(), b.clone(), e));
} else if line.starts_with("link-aarch64 = ") {
link_aarch64 = Some(String::from(&line[15..]));
} else if line.starts_with("link-arm = ") {
link_arm = Some(String::from(&line[11..]));
} else if line.starts_with("generate ") {
let line = &line[9..];
let types: Vec<String> = line
.split(',')
.map(|v| v.trim().to_string())
.flat_map(|v| match v.as_str() {
"uint*_t" => UINT_TYPES.iter().map(|v| v.to_string()).collect(),
"uint64x*_t" => UINT_TYPES_64.iter().map(|v| v.to_string()).collect(),
"int*_t" => INT_TYPES.iter().map(|v| v.to_string()).collect(),
"int64x*_t" => INT_TYPES_64.iter().map(|v| v.to_string()).collect(),
"float*_t" => FLOAT_TYPES.iter().map(|v| v.to_string()).collect(),
"float64x*_t" => FLOAT_TYPES_64.iter().map(|v| v.to_string()).collect(),
_ => vec![v],
})
.collect();
for line in types {
let spec: Vec<&str> = line.split(':').map(|e| e.trim()).collect();
let in_t;
let out_t;
if spec.len() == 1 {
in_t = spec[0];
out_t = spec[0];
} else if spec.len() == 2 {
in_t = spec[0];
out_t = spec[1];
} else {
panic!("Bad spec: {}", line)
}
let current_name = current_name.clone().unwrap();
let name = format!("{}{}", current_name, type_to_suffix(in_t),);
if let Some(current_arm) = current_arm.clone() {
let (function, test) = gen_arm(
¤t_comment,
¤t_fn,
&name,
¤t_arm,
&link_arm,
¤t_aarch64,
&link_aarch64,
&in_t,
&out_t,
¤t_tests,
);
out_arm.push_str(&function);
tests_arm.push_str(&test);
} else {
let (function, test) = gen_aarch64(
¤t_comment,
¤t_fn,
&name,
¤t_aarch64,
&link_aarch64,
&in_t,
&out_t,
¤t_tests,
);
out_aarch64.push_str(&function);
tests_aarch64.push_str(&test);
}
}
}
}
tests_arm.push('}');
tests_arm.push('\n');
tests_aarch64.push('}');
tests_aarch64.push('\n');
let arm_out_path: PathBuf = PathBuf::from(env::var("OUT_DIR").unwrap())
.join("src")
.join("arm");
std::fs::create_dir_all(&arm_out_path)?;
let mut file_arm = File::create(arm_out_path.join(ARM_OUT))?;
file_arm.write_all(out_arm.as_bytes())?;
file_arm.write_all(tests_arm.as_bytes())?;
let aarch64_out_path: PathBuf = PathBuf::from(env::var("OUT_DIR").unwrap())
.join("src")
.join("aarch64");
std::fs::create_dir_all(&aarch64_out_path)?;
let mut file_aarch = File::create(aarch64_out_path.join(AARCH64_OUT))?;
file_aarch.write_all(out_aarch64.as_bytes())?;
file_aarch.write_all(tests_aarch64.as_bytes())?;
Command::new("rustfmt")
.arg(&arm_out_path)
.arg(&aarch64_out_path)
.status()
.expect("failed to execute process");
Ok(())
}
| 29.742176 | 98 | 0.489001 |
64256f42b28846d1ae57591ffde3ee5b5d9c0bdf | 1,528 | extern crate cairo;
extern crate gdk;
use controller::canvas::Canvas;
use gtk::DrawingArea;
use gtk::WidgetExt;
use cairo::Context;
use cairo::Surface;
use controller::color::RGBColor;
pub trait CanvasView {
fn update(&mut self, &Canvas);
}
pub struct CairoView {
ctx: Context,
surface: Surface,
}
impl CanvasView for CairoView {
fn update(&mut self, canvas: &Canvas) {
let width = canvas.get_width();
let height = canvas.get_height();
let pixel_data = canvas.borrow_pixels();
for (i, row) in pixel_data.iter().enumerate() {
for (j, pixel) in row.iter().enumerate() {
self.draw_pixel(j, i, pixel);
}
}
}
}
impl CairoView {
// Create new CairoView associated with drawing area `da`
pub fn new(da: &DrawingArea) -> CairoView {
let surface = gdk::Window::create_similar_surface(
&da.get_window().expect("Failed to get drawing surface window"),
cairo::Content::Color,
da.get_allocated_width(),
da.get_allocated_height())
.expect("Failed to create surface");
let ctx = Context::new(&surface);
ctx.set_antialias(cairo::Antialias::None);
CairoView {
ctx,
surface
}
}
fn draw_pixel(&self, x: usize, y: usize, color: &RGBColor) {
self.ctx.set_source(&color.as_cairo_pattern());
self.ctx.rectangle(x as f64, y as f64, 1., 1.);
self.ctx.fill();
}
} | 27.781818 | 80 | 0.587696 |
568f4fd1ab710be44d738a8224b27dfe9fbd3d32 | 1,243 | //! A stream of arbitrary audio.
//!
//! Audio streams are useful if you need to dynamically generate
//! audio. For instance, you can use an audio stream to synthesize
//! sound effects in real time or feed audio from a voice chat
//! into the mixer.
//!
//! If you just need to play an audio file, you should probably use
//! [instances](crate::instance).
use std::fmt::Debug;
use uuid::Uuid;
use crate::Frame;
/// Produces a constant flow of audio data in real time.
pub trait AudioStream: Debug + Send + 'static {
/// Produces the next sample.
///
/// The audio thread has to wait for this function to finish,
/// so it should process quickly and in a consistent amount
/// of time to avoid audio glitches, such as stuttering.
///
/// `dt` represents how many seconds have elapsed since the last request.
fn next(&mut self, dt: f64) -> Frame;
}
/// A unique identifier for an [`AudioStream`](crate::audio_stream::AudioStream).
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(
feature = "serde_support",
derive(serde::Serialize, serde::Deserialize),
serde(transparent)
)]
pub struct AudioStreamId {
uuid: Uuid,
}
impl AudioStreamId {
pub(crate) fn new() -> Self {
Self {
uuid: Uuid::new_v4(),
}
}
}
| 26.446809 | 81 | 0.692679 |
5021837765c55abbbe36b43d6f0d9a7d90f24f8d | 5,445 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::DOEP3_TSIZ {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct XFERSIZER {
bits: u32,
}
impl XFERSIZER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct PKTCNTR {
bits: u16,
}
impl PKTCNTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = "Possible values of the field `RXDPIDSUPCNT`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXDPIDSUPCNTR {
#[doc = "DATA0 PID."]
DATA0,
#[doc = "DATA2 PID / 1 Packet."]
DATA2,
#[doc = "DATA1 PID / 2 Packets."]
DATA1,
#[doc = "MDATA PID / 3 Packets."]
MDATA,
}
impl RXDPIDSUPCNTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
RXDPIDSUPCNTR::DATA0 => 0,
RXDPIDSUPCNTR::DATA2 => 0x01,
RXDPIDSUPCNTR::DATA1 => 0x02,
RXDPIDSUPCNTR::MDATA => 0x03,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> RXDPIDSUPCNTR {
match value {
0 => RXDPIDSUPCNTR::DATA0,
1 => RXDPIDSUPCNTR::DATA2,
2 => RXDPIDSUPCNTR::DATA1,
3 => RXDPIDSUPCNTR::MDATA,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DATA0`"]
#[inline]
pub fn is_data0(&self) -> bool {
*self == RXDPIDSUPCNTR::DATA0
}
#[doc = "Checks if the value of the field is `DATA2`"]
#[inline]
pub fn is_data2(&self) -> bool {
*self == RXDPIDSUPCNTR::DATA2
}
#[doc = "Checks if the value of the field is `DATA1`"]
#[inline]
pub fn is_data1(&self) -> bool {
*self == RXDPIDSUPCNTR::DATA1
}
#[doc = "Checks if the value of the field is `MDATA`"]
#[inline]
pub fn is_mdata(&self) -> bool {
*self == RXDPIDSUPCNTR::MDATA
}
}
#[doc = r" Proxy"]
pub struct _XFERSIZEW<'a> {
w: &'a mut W,
}
impl<'a> _XFERSIZEW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
const MASK: u32 = 0x0007_ffff;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _PKTCNTW<'a> {
w: &'a mut W,
}
impl<'a> _PKTCNTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 0x03ff;
const OFFSET: u8 = 19;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:18 - Transfer Size"]
#[inline]
pub fn xfersize(&self) -> XFERSIZER {
let bits = {
const MASK: u32 = 0x0007_ffff;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
};
XFERSIZER { bits }
}
#[doc = "Bits 19:28 - Packet Count"]
#[inline]
pub fn pktcnt(&self) -> PKTCNTR {
let bits = {
const MASK: u16 = 0x03ff;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) as u16
};
PKTCNTR { bits }
}
#[doc = "Bits 29:30 - Receive Data PID / SETUP Packet Count"]
#[inline]
pub fn rxdpidsupcnt(&self) -> RXDPIDSUPCNTR {
RXDPIDSUPCNTR::_from({
const MASK: u8 = 0x03;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:18 - Transfer Size"]
#[inline]
pub fn xfersize(&mut self) -> _XFERSIZEW {
_XFERSIZEW { w: self }
}
#[doc = "Bits 19:28 - Packet Count"]
#[inline]
pub fn pktcnt(&mut self) -> _PKTCNTW {
_PKTCNTW { w: self }
}
}
| 25.683962 | 65 | 0.510193 |
4b52f195ac07e5bd6291a5d9e47a6ba3fc67eb96 | 2,055 | use crate::config::EmptyConfig::EmptyConfig;
use crate::dsl::task_macro::*;
carp_task! {
// The task name. This is what will show up in the task graph
// and this is how you specify dependencies
name ExampleTask;
configuration EmptyConfig;
doc "An example task to help people learn how to write custom Carp tasks";
// The era your task operates on. Note: different eras have different block representations
era multiera;
// List of dependencies for this task. This is an array of names of other tasks
// Note: your task will run if all dependencies either ran successfully OR were skipped for this block
dependencies [];
// Specify which fields your task will have read-access to
read [multiera_txs];
// Specify which fields your task will have write-access to
write [multiera_addresses];
// Specify whether or not your task needs to run for a given block
// Note that by design, this function:
// 1) CANNOT access parent task state
// 2) Is NOT async
// 3) CANNOT save intermediate state
// (1) is because this function is called BEFORE any task is actually run to generate the actual execution plan for a block
// (2) is because this is meant to be a cheap optimization to skip tasks if they clearly aren't required
// Ex: if your task can be skipped if no txs exists in the block, if no metadata exists in the block, etc.
// (3) is because the cost of storing and passing around intermediate state would be more expensive than recomputing
should_add_task |_block, _properties| {
true
};
// Specify the function what your task actually does
// Your task has access to the full block data and any data you specified in either `read` or `write`
execute |_previous_data, task| handle_dummy(
task.db_tx,
task.block,
);
// Specify how to merge the result of your task back into the global state
merge_result |data, _result| {
};
}
async fn handle_dummy(
_db_tx: &DatabaseTransaction,
_block: BlockInfo<'_, alonzo::Block<'_>>,
) -> Result<(), DbErr> {
Ok(())
}
| 42.8125 | 125 | 0.722141 |
23ba22aff5bce73e08d192bee314161c204ba482 | 4,282 | use super::{Graph, Marked};
use std::collections::{HashMap, VecDeque};
use std::hash::Hash;
pub struct Path<G: Graph> {
pub(super) stack: Vec<G::VertexKey>,
}
impl<G: Graph> Clone for Path<G> {
fn clone(&self) -> Self {
Self {
stack: self.stack.clone(),
}
}
}
impl<G: Graph> Default for Path<G> {
fn default() -> Self {
Self {
stack: Vec::default(),
}
}
}
impl<G: Graph> Path<G> {
pub(super) fn path_to(
edge_to: &HashMap<G::VertexKey, G::VertexKey>,
src: &G::VertexKey,
dst: &G::VertexKey,
) -> Self
where
G::VertexKey: Hash,
{
let mut stack = Vec::new();
let mut mid = dst.clone();
while mid != *src {
stack.push(mid.clone());
mid = edge_to.get(&mid).unwrap().clone();
}
stack.push(src.clone());
Path { stack }
}
}
impl<G: Graph> Iterator for Path<G> {
type Item = G::VertexKey;
fn next(&mut self) -> Option<Self::Item> {
self.stack.pop()
}
}
pub struct DepthFirstPaths<G: Graph> {
src: G::VertexKey,
pub(super) marked: HashMap<G::VertexKey, Marked>,
pub(super) edge_to: HashMap<G::VertexKey, G::VertexKey>,
}
impl<G: Graph> DepthFirstPaths<G>
where
G::VertexKey: Hash,
{
fn dfs(
graph: &G,
marked: &mut HashMap<G::VertexKey, Marked>,
edge_to: &mut HashMap<G::VertexKey, G::VertexKey>,
src: &G::VertexKey,
) {
marked.insert(src.clone(), Marked);
let adjs: Vec<_> = graph
.adj(src)
.map(|(to, _)| to)
.filter(|dst| marked.get(&dst).is_none())
.collect();
for dst in adjs {
if marked.get(&dst).is_none() {
edge_to.insert(dst.clone(), src.clone());
Self::dfs(graph, marked, edge_to, &dst)
}
}
}
pub fn has_path_to(&self, dst: &G::VertexKey) -> bool {
self.marked.get(dst).is_some()
}
pub fn path_to(&self, dst: &G::VertexKey) -> Path<G> {
if self.has_path_to(dst) {
Path::path_to(&self.edge_to, &self.src, dst)
} else {
Path::default()
}
}
pub fn uninit(src: &G::VertexKey) -> Self {
Self {
src: src.clone(),
marked: HashMap::new(),
edge_to: HashMap::new(),
}
}
pub fn new(graph: &G, src: &G::VertexKey) -> Self {
let mut marked = HashMap::new();
let mut edge_to = HashMap::new();
Self::dfs(graph, &mut marked, &mut edge_to, src);
Self {
src: src.clone(),
marked,
edge_to,
}
}
}
pub struct BreadthFirstPaths<G: Graph> {
src: G::VertexKey,
marked: HashMap<G::VertexKey, Marked>,
edge_to: HashMap<G::VertexKey, G::VertexKey>,
}
impl<G: Graph> BreadthFirstPaths<G>
where
G::VertexKey: Hash,
{
fn bfs(
graph: &G,
marked: &mut HashMap<G::VertexKey, Marked>,
edge_to: &mut HashMap<G::VertexKey, G::VertexKey>,
src: &G::VertexKey,
) {
let mut queue = VecDeque::new();
// visit src
marked.insert(src.clone(), Marked);
queue.push_back(src.clone());
while !queue.is_empty() {
let src = queue.pop_front().unwrap();
for (dst, _) in graph.adj(&src) {
if marked.get(&dst).is_none() {
edge_to.insert(dst.clone(), src.clone());
marked.insert(dst.clone(), Marked);
queue.push_back(dst);
}
}
}
}
pub fn has_path_to(&self, dst: &G::VertexKey) -> bool {
self.marked.get(dst).is_some()
}
pub fn path_to(&self, dst: &G::VertexKey) -> Path<G> {
if self.has_path_to(dst) {
Path::path_to(&self.edge_to, &self.src, dst)
} else {
Path::default()
}
}
pub fn new(graph: &G, src: &G::VertexKey) -> Self {
let mut marked = HashMap::new();
let mut edge_to = HashMap::new();
Self::bfs(graph, &mut marked, &mut edge_to, src);
Self {
src: src.clone(),
marked,
edge_to,
}
}
}
| 24.895349 | 61 | 0.499533 |
fe3a9b6b3dcdd7b18d414a97594cc1bfe19f10b7 | 20,690 | use rustc::middle::cstore::CrateStore;
use rustc::middle::privacy::AccessLevels;
use rustc::ty::{Ty, TyCtxt};
use rustc_ast::ast::CRATE_NODE_ID;
use rustc_attr as attr;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_driver::abort_on_err;
use rustc_errors::emitter::{Emitter, EmitterWriter};
use rustc_errors::json::JsonEmitter;
use rustc_feature::UnstableFeatures;
use rustc_hir::def::Namespace::TypeNS;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE};
use rustc_hir::HirId;
use rustc_interface::interface;
use rustc_resolve as resolve;
use rustc_session::config::ErrorOutputType;
use rustc_session::lint;
use rustc_session::DiagnosticOutput;
use rustc_session::{config, Session};
use rustc_span::source_map;
use rustc_span::symbol::sym;
use rustc_span::DUMMY_SP;
use rustc_data_structures::sync::{self, Lrc};
use std::cell::RefCell;
use std::mem;
use std::rc::Rc;
use crate::clean;
use crate::clean::{AttributesExt, MAX_DEF_ID};
use crate::config::{Options as RustdocOptions, RenderOptions};
use crate::html::render::RenderInfo;
use crate::passes::{self, Condition::*, ConditionalPass};
pub use rustc_session::config::{CodegenOptions, DebuggingOptions, Input, Options};
pub use rustc_session::search_paths::SearchPath;
pub type ExternalPaths = FxHashMap<DefId, (Vec<String>, clean::TypeKind)>;
pub struct DocContext<'tcx> {
pub tcx: TyCtxt<'tcx>,
pub resolver: Rc<RefCell<interface::BoxedResolver>>,
/// Later on moved into `html::render::CACHE_KEY`
pub renderinfo: RefCell<RenderInfo>,
/// Later on moved through `clean::Crate` into `html::render::CACHE_KEY`
pub external_traits: Rc<RefCell<FxHashMap<DefId, clean::Trait>>>,
/// Used while populating `external_traits` to ensure we don't process the same trait twice at
/// the same time.
pub active_extern_traits: RefCell<FxHashSet<DefId>>,
// The current set of type and lifetime substitutions,
// for expanding type aliases at the HIR level:
/// Table `DefId` of type parameter -> substituted type
pub ty_substs: RefCell<FxHashMap<DefId, clean::Type>>,
/// Table `DefId` of lifetime parameter -> substituted lifetime
pub lt_substs: RefCell<FxHashMap<DefId, clean::Lifetime>>,
/// Table `DefId` of const parameter -> substituted const
pub ct_substs: RefCell<FxHashMap<DefId, clean::Constant>>,
/// Table synthetic type parameter for `impl Trait` in argument position -> bounds
pub impl_trait_bounds: RefCell<FxHashMap<ImplTraitParam, Vec<clean::GenericBound>>>,
pub fake_def_ids: RefCell<FxHashMap<CrateNum, DefId>>,
pub all_fake_def_ids: RefCell<FxHashSet<DefId>>,
/// Auto-trait or blanket impls processed so far, as `(self_ty, trait_def_id)`.
// FIXME(eddyb) make this a `ty::TraitRef<'tcx>` set.
pub generated_synthetics: RefCell<FxHashSet<(Ty<'tcx>, DefId)>>,
pub auto_traits: Vec<DefId>,
}
impl<'tcx> DocContext<'tcx> {
pub fn sess(&self) -> &Session {
&self.tcx.sess
}
pub fn enter_resolver<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut resolve::Resolver<'_>) -> R,
{
self.resolver.borrow_mut().access(f)
}
/// Call the closure with the given parameters set as
/// the substitutions for a type alias' RHS.
pub fn enter_alias<F, R>(
&self,
ty_substs: FxHashMap<DefId, clean::Type>,
lt_substs: FxHashMap<DefId, clean::Lifetime>,
ct_substs: FxHashMap<DefId, clean::Constant>,
f: F,
) -> R
where
F: FnOnce() -> R,
{
let (old_tys, old_lts, old_cts) = (
mem::replace(&mut *self.ty_substs.borrow_mut(), ty_substs),
mem::replace(&mut *self.lt_substs.borrow_mut(), lt_substs),
mem::replace(&mut *self.ct_substs.borrow_mut(), ct_substs),
);
let r = f();
*self.ty_substs.borrow_mut() = old_tys;
*self.lt_substs.borrow_mut() = old_lts;
*self.ct_substs.borrow_mut() = old_cts;
r
}
// This is an ugly hack, but it's the simplest way to handle synthetic impls without greatly
// refactoring either librustdoc or librustc. In particular, allowing new DefIds to be
// registered after the AST is constructed would require storing the defid mapping in a
// RefCell, decreasing the performance for normal compilation for very little gain.
//
// Instead, we construct 'fake' def ids, which start immediately after the last DefId.
// In the Debug impl for clean::Item, we explicitly check for fake
// def ids, as we'll end up with a panic if we use the DefId Debug impl for fake DefIds
pub fn next_def_id(&self, crate_num: CrateNum) -> DefId {
let start_def_id = {
let next_id = if crate_num == LOCAL_CRATE {
self.tcx.hir().definitions().def_path_table().next_id()
} else {
self.enter_resolver(|r| r.cstore().def_path_table(crate_num).next_id())
};
DefId { krate: crate_num, index: next_id }
};
let mut fake_ids = self.fake_def_ids.borrow_mut();
let def_id = *fake_ids.entry(crate_num).or_insert(start_def_id);
fake_ids.insert(
crate_num,
DefId { krate: crate_num, index: DefIndex::from(def_id.index.index() + 1) },
);
MAX_DEF_ID.with(|m| {
m.borrow_mut().entry(def_id.krate.clone()).or_insert(start_def_id);
});
self.all_fake_def_ids.borrow_mut().insert(def_id);
def_id
}
/// Like the function of the same name on the HIR map, but skips calling it on fake DefIds.
/// (This avoids a slice-index-out-of-bounds panic.)
pub fn as_local_hir_id(&self, def_id: DefId) -> Option<HirId> {
if self.all_fake_def_ids.borrow().contains(&def_id) {
None
} else {
self.tcx.hir().as_local_hir_id(def_id)
}
}
pub fn stability(&self, id: HirId) -> Option<attr::Stability> {
self.tcx
.hir()
.opt_local_def_id(id)
.and_then(|def_id| self.tcx.lookup_stability(def_id))
.cloned()
}
pub fn deprecation(&self, id: HirId) -> Option<attr::Deprecation> {
self.tcx.hir().opt_local_def_id(id).and_then(|def_id| self.tcx.lookup_deprecation(def_id))
}
}
/// Creates a new diagnostic `Handler` that can be used to emit warnings and errors.
///
/// If the given `error_format` is `ErrorOutputType::Json` and no `SourceMap` is given, a new one
/// will be created for the handler.
pub fn new_handler(
error_format: ErrorOutputType,
source_map: Option<Lrc<source_map::SourceMap>>,
debugging_opts: &DebuggingOptions,
) -> rustc_errors::Handler {
let emitter: Box<dyn Emitter + sync::Send> = match error_format {
ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip();
Box::new(
EmitterWriter::stderr(
color_config,
source_map.map(|sm| sm as _),
short,
debugging_opts.teach,
debugging_opts.terminal_width,
false,
)
.ui_testing(debugging_opts.ui_testing()),
)
}
ErrorOutputType::Json { pretty, json_rendered } => {
let source_map = source_map.unwrap_or_else(|| {
Lrc::new(source_map::SourceMap::new(source_map::FilePathMapping::empty()))
});
Box::new(
JsonEmitter::stderr(None, source_map, pretty, json_rendered, false)
.ui_testing(debugging_opts.ui_testing()),
)
}
};
rustc_errors::Handler::with_emitter_and_flags(
emitter,
debugging_opts.diagnostic_handler_flags(true),
)
}
pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOptions) {
// Parse, resolve, and typecheck the given crate.
let RustdocOptions {
input,
crate_name,
proc_macro_crate,
error_format,
libs,
externs,
mut cfgs,
codegen_options,
debugging_options,
target,
edition,
maybe_sysroot,
lint_opts,
describe_lints,
lint_cap,
mut default_passes,
mut document_private,
document_hidden,
mut manual_passes,
display_warnings,
render_options,
output_format,
..
} = options;
let extern_names: Vec<String> = externs
.iter()
.filter(|(_, entry)| entry.add_prelude)
.map(|(name, _)| name)
.cloned()
.collect();
// Add the doc cfg into the doc build.
cfgs.push("doc".to_string());
let cpath = Some(input.clone());
let input = Input::File(input);
let intra_link_resolution_failure_name = lint::builtin::INTRA_DOC_LINK_RESOLUTION_FAILURE.name;
let warnings_lint_name = lint::builtin::WARNINGS.name;
let missing_docs = rustc_lint::builtin::MISSING_DOCS.name;
let missing_doc_example = rustc_lint::builtin::MISSING_DOC_CODE_EXAMPLES.name;
let private_doc_tests = rustc_lint::builtin::PRIVATE_DOC_TESTS.name;
let no_crate_level_docs = rustc_lint::builtin::MISSING_CRATE_LEVEL_DOCS.name;
// In addition to those specific lints, we also need to whitelist those given through
// command line, otherwise they'll get ignored and we don't want that.
let mut whitelisted_lints = vec![
warnings_lint_name.to_owned(),
intra_link_resolution_failure_name.to_owned(),
missing_docs.to_owned(),
missing_doc_example.to_owned(),
private_doc_tests.to_owned(),
no_crate_level_docs.to_owned(),
];
whitelisted_lints.extend(lint_opts.iter().map(|(lint, _)| lint).cloned());
let lints = || {
lint::builtin::HardwiredLints::get_lints()
.into_iter()
.chain(rustc_lint::SoftLints::get_lints().into_iter())
};
let lint_opts = lints()
.filter_map(|lint| {
if lint.name == warnings_lint_name || lint.name == intra_link_resolution_failure_name {
None
} else {
Some((lint.name_lower(), lint::Allow))
}
})
.chain(lint_opts.into_iter())
.collect::<Vec<_>>();
let lint_caps = lints()
.filter_map(|lint| {
// We don't want to whitelist *all* lints so let's
// ignore those ones.
if whitelisted_lints.iter().any(|l| lint.name == l) {
None
} else {
Some((lint::LintId::of(lint), lint::Allow))
}
})
.collect();
let crate_types = if proc_macro_crate {
vec![config::CrateType::ProcMacro]
} else {
vec![config::CrateType::Rlib]
};
// plays with error output here!
let sessopts = config::Options {
maybe_sysroot,
search_paths: libs,
crate_types,
lint_opts: if !display_warnings { lint_opts } else { vec![] },
lint_cap: Some(lint_cap.unwrap_or_else(|| lint::Forbid)),
cg: codegen_options,
externs,
target_triple: target,
unstable_features: UnstableFeatures::from_environment(),
actually_rustdoc: true,
debugging_opts: debugging_options,
error_format,
edition,
describe_lints,
..Options::default()
};
let config = interface::Config {
opts: sessopts,
crate_cfg: interface::parse_cfgspecs(cfgs),
input,
input_path: cpath,
output_file: None,
output_dir: None,
file_loader: None,
diagnostic_output: DiagnosticOutput::Default,
stderr: None,
crate_name,
lint_caps,
register_lints: None,
override_queries: None,
registry: rustc_driver::diagnostics_registry(),
};
interface::run_compiler_in_existing_thread_pool(config, |compiler| {
compiler.enter(|queries| {
let sess = compiler.session();
// We need to hold on to the complete resolver, so we cause everything to be
// cloned for the analysis passes to use. Suboptimal, but necessary in the
// current architecture.
let resolver = {
let parts = abort_on_err(queries.expansion(), sess).peek();
let resolver = parts.1.borrow();
// Before we actually clone it, let's force all the extern'd crates to
// actually be loaded, just in case they're only referred to inside
// intra-doc-links
resolver.borrow_mut().access(|resolver| {
for extern_name in &extern_names {
resolver
.resolve_str_path_error(DUMMY_SP, extern_name, TypeNS, CRATE_NODE_ID)
.unwrap_or_else(|()| {
panic!("Unable to resolve external crate {}", extern_name)
});
}
});
// Now we're good to clone the resolver because everything should be loaded
resolver.clone()
};
if sess.has_errors() {
sess.fatal("Compilation failed, aborting rustdoc");
}
let mut global_ctxt = abort_on_err(queries.global_ctxt(), sess).take();
global_ctxt.enter(|tcx| {
tcx.analysis(LOCAL_CRATE).ok();
// Abort if there were any errors so far
sess.abort_if_errors();
let access_levels = tcx.privacy_access_levels(LOCAL_CRATE);
// Convert from a HirId set to a DefId set since we don't always have easy access
// to the map from defid -> hirid
let access_levels = AccessLevels {
map: access_levels
.map
.iter()
.map(|(&k, &v)| (tcx.hir().local_def_id(k), v))
.collect(),
};
let mut renderinfo = RenderInfo::default();
renderinfo.access_levels = access_levels;
renderinfo.output_format = output_format;
let mut ctxt = DocContext {
tcx,
resolver,
external_traits: Default::default(),
active_extern_traits: Default::default(),
renderinfo: RefCell::new(renderinfo),
ty_substs: Default::default(),
lt_substs: Default::default(),
ct_substs: Default::default(),
impl_trait_bounds: Default::default(),
fake_def_ids: Default::default(),
all_fake_def_ids: Default::default(),
generated_synthetics: Default::default(),
auto_traits: tcx
.all_traits(LOCAL_CRATE)
.iter()
.cloned()
.filter(|trait_def_id| tcx.trait_is_auto(*trait_def_id))
.collect(),
};
debug!("crate: {:?}", tcx.hir().krate());
let mut krate = clean::krate(&mut ctxt);
if let Some(ref m) = krate.module {
if let None | Some("") = m.doc_value() {
let help = "The following guide may be of use:\n\
https://doc.rust-lang.org/nightly/rustdoc/how-to-write-documentation\
.html";
tcx.struct_lint_node(
rustc_lint::builtin::MISSING_CRATE_LEVEL_DOCS,
ctxt.as_local_hir_id(m.def_id).unwrap(),
|lint| {
let mut diag = lint.build(
"no documentation found for this crate's top-level module",
);
diag.help(help);
diag.emit();
},
);
}
}
fn report_deprecated_attr(name: &str, diag: &rustc_errors::Handler) {
let mut msg = diag.struct_warn(&format!(
"the `#![doc({})]` attribute is considered deprecated",
name
));
msg.warn(
"see issue #44136 <https://github.com/rust-lang/rust/issues/44136> \
for more information",
);
if name == "no_default_passes" {
msg.help("you may want to use `#![doc(document_private_items)]`");
}
msg.emit();
}
// Process all of the crate attributes, extracting plugin metadata along
// with the passes which we are supposed to run.
for attr in krate.module.as_ref().unwrap().attrs.lists(sym::doc) {
let diag = ctxt.sess().diagnostic();
let name = attr.name_or_empty();
if attr.is_word() {
if name == sym::no_default_passes {
report_deprecated_attr("no_default_passes", diag);
if default_passes == passes::DefaultPassOption::Default {
default_passes = passes::DefaultPassOption::None;
}
}
} else if let Some(value) = attr.value_str() {
let sink = match name {
sym::passes => {
report_deprecated_attr("passes = \"...\"", diag);
&mut manual_passes
}
sym::plugins => {
report_deprecated_attr("plugins = \"...\"", diag);
eprintln!(
"WARNING: `#![doc(plugins = \"...\")]` \
no longer functions; see CVE-2018-1000622"
);
continue;
}
_ => continue,
};
for name in value.as_str().split_whitespace() {
sink.push(name.to_string());
}
}
if attr.is_word() && name == sym::document_private_items {
document_private = true;
}
}
let passes = passes::defaults(default_passes).iter().copied().chain(
manual_passes.into_iter().flat_map(|name| {
if let Some(pass) = passes::find_pass(&name) {
Some(ConditionalPass::always(pass))
} else {
error!("unknown pass {}, skipping", name);
None
}
}),
);
info!("Executing passes");
for p in passes {
let run = match p.condition {
Always => true,
WhenDocumentPrivate => document_private,
WhenNotDocumentPrivate => !document_private,
WhenNotDocumentHidden => !document_hidden,
};
if run {
debug!("running pass {}", p.pass.name);
krate = (p.pass.run)(krate, &ctxt);
}
}
ctxt.sess().abort_if_errors();
(krate, ctxt.renderinfo.into_inner(), render_options)
})
})
})
}
/// `DefId` or parameter index (`ty::ParamTy.index`) of a synthetic type parameter
/// for `impl Trait` in argument position.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum ImplTraitParam {
DefId(DefId),
ParamIndex(u32),
}
impl From<DefId> for ImplTraitParam {
fn from(did: DefId) -> Self {
ImplTraitParam::DefId(did)
}
}
impl From<u32> for ImplTraitParam {
fn from(idx: u32) -> Self {
ImplTraitParam::ParamIndex(idx)
}
}
| 38.033088 | 99 | 0.545771 |
481fd5fcc61ef44b0781ad852bf7a15b2e6002f0 | 12,481 | use crate::envelope::Envelope;
use crate::node::{envelope_for_children, ParentNode, RTreeNode};
use crate::object::RTreeObject;
use crate::params::{InsertionStrategy, RTreeParams};
use crate::point::{Point, PointExt};
use crate::rtree::RTree;
use alloc::vec::Vec;
use num_traits::{Bounded, Zero};
/// Inserts points according to the r-star heuristic.
///
/// The r*-heuristic focusses on good insertion quality at the costs of
/// insertion performance. This strategy is best for use cases with few
/// insertions and many nearest neighbor queries.
///
/// `RStarInsertionStrategy` is used as the default insertion strategy.
/// See [InsertionStrategy] for more information on insertion strategies.
pub enum RStarInsertionStrategy {}
enum InsertionResult<T>
where
T: RTreeObject,
{
Split(RTreeNode<T>),
Reinsert(Vec<RTreeNode<T>>, usize),
Complete,
}
impl InsertionStrategy for RStarInsertionStrategy {
fn insert<T, Params>(tree: &mut RTree<T, Params>, t: T)
where
Params: RTreeParams,
T: RTreeObject,
{
use InsertionAction::*;
enum InsertionAction<T: RTreeObject> {
PerformSplit(RTreeNode<T>),
PerformReinsert(RTreeNode<T>),
}
let first = recursive_insert::<_, Params>(tree.root_mut(), RTreeNode::Leaf(t), 0);
let mut target_height = 0;
let mut insertion_stack = Vec::new();
match first {
InsertionResult::Split(node) => insertion_stack.push(PerformSplit(node)),
InsertionResult::Reinsert(nodes_to_reinsert, real_target_height) => {
insertion_stack.extend(nodes_to_reinsert.into_iter().map(PerformReinsert));
target_height = real_target_height;
}
InsertionResult::Complete => {}
};
while let Some(next) = insertion_stack.pop() {
match next {
PerformSplit(node) => {
// The root node was split, create a new root and increase height
let new_root = ParentNode::new_root::<Params>();
let old_root = ::core::mem::replace(tree.root_mut(), new_root);
let new_envelope = old_root.envelope.merged(&node.envelope());
let root = tree.root_mut();
root.envelope = new_envelope;
root.children.push(RTreeNode::Parent(old_root));
root.children.push(node);
target_height += 1;
}
PerformReinsert(node_to_reinsert) => {
let root = tree.root_mut();
match forced_insertion::<T, Params>(root, node_to_reinsert, target_height) {
InsertionResult::Split(node) => insertion_stack.push(PerformSplit(node)),
InsertionResult::Reinsert(_, _) => {
panic!("Unexpected reinsert. This is a bug in rstar.")
}
InsertionResult::Complete => {}
}
}
}
}
}
}
fn forced_insertion<T, Params>(
node: &mut ParentNode<T>,
t: RTreeNode<T>,
target_height: usize,
) -> InsertionResult<T>
where
T: RTreeObject,
Params: RTreeParams,
{
node.envelope.merge(&t.envelope());
let expand_index = choose_subtree(node, &t);
if target_height == 0 || node.children.len() < expand_index {
// Force insertion into this node
node.children.push(t);
return resolve_overflow_without_reinsertion::<_, Params>(node);
}
if let RTreeNode::Parent(ref mut follow) = node.children[expand_index] {
match forced_insertion::<_, Params>(follow, t, target_height - 1) {
InsertionResult::Split(child) => {
node.envelope.merge(&child.envelope());
node.children.push(child);
resolve_overflow_without_reinsertion::<_, Params>(node)
}
other => other,
}
} else {
unreachable!("This is a bug in rstar.")
}
}
fn recursive_insert<T, Params>(
node: &mut ParentNode<T>,
t: RTreeNode<T>,
current_height: usize,
) -> InsertionResult<T>
where
T: RTreeObject,
Params: RTreeParams,
{
node.envelope.merge(&t.envelope());
let expand_index = choose_subtree(node, &t);
if node.children.len() < expand_index {
// Force insertion into this node
node.children.push(t);
return resolve_overflow::<_, Params>(node, current_height);
}
let expand = if let RTreeNode::Parent(ref mut follow) = node.children[expand_index] {
recursive_insert::<_, Params>(follow, t, current_height + 1)
} else {
panic!("This is a bug in rstar.")
};
match expand {
InsertionResult::Split(child) => {
node.envelope.merge(&child.envelope());
node.children.push(child);
resolve_overflow::<_, Params>(node, current_height)
}
InsertionResult::Reinsert(a, b) => {
node.envelope = envelope_for_children(&node.children);
InsertionResult::Reinsert(a, b)
}
other => other,
}
}
fn choose_subtree<T>(node: &mut ParentNode<T>, to_insert: &RTreeNode<T>) -> usize
where
T: RTreeObject,
{
let all_leaves = match node.children.first() {
Some(RTreeNode::Leaf(_)) => return usize::max_value(),
Some(RTreeNode::Parent(ref data)) => data
.children
.first()
.map(RTreeNode::is_leaf)
.unwrap_or(true),
_ => return usize::max_value(),
};
let zero: <<T::Envelope as Envelope>::Point as Point>::Scalar = Zero::zero();
let insertion_envelope = to_insert.envelope();
let mut inclusion_count = 0;
let mut min_area = <<T::Envelope as Envelope>::Point as Point>::Scalar::max_value();
let mut min_index = 0;
for (index, child) in node.children.iter().enumerate() {
let envelope = child.envelope();
if envelope.contains_envelope(&insertion_envelope) {
inclusion_count += 1;
let area = envelope.area();
if area < min_area {
min_area = area;
min_index = index;
}
}
}
if inclusion_count == 0 {
// No inclusion found, subtree depends on overlap and area increase
let mut min = (zero, zero, zero);
for (index, child1) in node.children.iter().enumerate() {
let envelope = child1.envelope();
let mut new_envelope = envelope;
new_envelope.merge(&insertion_envelope);
let overlap_increase = if all_leaves {
// Calculate minimal overlap increase
let mut overlap = zero;
let mut new_overlap = zero;
for child2 in &node.children {
if child1 as *const _ != child2 as *const _ {
let child_envelope = child2.envelope();
let temp1 = envelope.intersection_area(&child_envelope);
overlap = overlap + temp1;
let temp2 = new_envelope.intersection_area(&child_envelope);
new_overlap = new_overlap + temp2;
}
}
new_overlap - overlap
} else {
// Don't calculate overlap increase if not all children are leaves
zero
};
// Calculate area increase and area
let area = new_envelope.area();
let area_increase = area - envelope.area();
let new_min = (overlap_increase, area_increase, area);
if new_min < min || index == 0 {
min = new_min;
min_index = index;
}
}
}
min_index
}
// Never returns a request for reinsertion
fn resolve_overflow_without_reinsertion<T, Params>(node: &mut ParentNode<T>) -> InsertionResult<T>
where
T: RTreeObject,
Params: RTreeParams,
{
if node.children.len() > Params::MAX_SIZE {
let off_split = split::<_, Params>(node);
InsertionResult::Split(off_split)
} else {
InsertionResult::Complete
}
}
fn resolve_overflow<T, Params>(node: &mut ParentNode<T>, current_depth: usize) -> InsertionResult<T>
where
T: RTreeObject,
Params: RTreeParams,
{
if Params::REINSERTION_COUNT == 0 {
resolve_overflow_without_reinsertion::<_, Params>(node)
} else if node.children.len() > Params::MAX_SIZE {
let nodes_for_reinsertion = get_nodes_for_reinsertion::<_, Params>(node);
InsertionResult::Reinsert(nodes_for_reinsertion, current_depth)
} else {
InsertionResult::Complete
}
}
fn split<T, Params>(node: &mut ParentNode<T>) -> RTreeNode<T>
where
T: RTreeObject,
Params: RTreeParams,
{
let axis = get_split_axis::<_, Params>(node);
let zero = <<T::Envelope as Envelope>::Point as Point>::Scalar::zero();
debug_assert!(node.children.len() >= 2);
// Sort along axis
T::Envelope::sort_envelopes(axis, &mut node.children);
let mut best = (zero, zero);
let min_size = Params::MIN_SIZE;
let mut best_index = min_size;
for k in min_size..=node.children.len() - min_size {
let mut first_envelope = node.children[k - 1].envelope();
let mut second_envelope = node.children[k].envelope();
let (l, r) = node.children.split_at(k);
for child in l {
first_envelope.merge(&child.envelope());
}
for child in r {
second_envelope.merge(&child.envelope());
}
let overlap_value = first_envelope.intersection_area(&second_envelope);
let area_value = first_envelope.area() + second_envelope.area();
let new_best = (overlap_value, area_value);
if new_best < best || k == min_size {
best = new_best;
best_index = k;
}
}
let off_split = node.children.split_off(best_index);
node.envelope = envelope_for_children(&node.children);
RTreeNode::Parent(ParentNode::new_parent(off_split))
}
fn get_split_axis<T, Params>(node: &mut ParentNode<T>) -> usize
where
T: RTreeObject,
Params: RTreeParams,
{
let mut best_goodness = <<T::Envelope as Envelope>::Point as Point>::Scalar::max_value();
let mut best_axis = 0;
let min_size = Params::MIN_SIZE;
let until = node.children.len() - min_size + 1;
for axis in 0..<T::Envelope as Envelope>::Point::DIMENSIONS {
// Sort children along the current axis
T::Envelope::sort_envelopes(axis, &mut node.children);
let mut first_envelope = T::Envelope::new_empty();
let mut second_envelope = T::Envelope::new_empty();
for child in &node.children[..min_size] {
first_envelope.merge(&child.envelope());
}
for child in &node.children[until..] {
second_envelope.merge(&child.envelope());
}
for k in min_size..until {
let mut first_modified = first_envelope;
let mut second_modified = second_envelope;
let (l, r) = node.children.split_at(k);
for child in l {
first_modified.merge(&child.envelope());
}
for child in r {
second_modified.merge(&child.envelope());
}
let perimeter_value =
first_modified.perimeter_value() + second_modified.perimeter_value();
if best_goodness > perimeter_value {
best_axis = axis;
best_goodness = perimeter_value;
}
}
}
best_axis
}
fn get_nodes_for_reinsertion<T, Params>(node: &mut ParentNode<T>) -> Vec<RTreeNode<T>>
where
T: RTreeObject,
Params: RTreeParams,
{
let center = node.envelope.center();
// Sort with increasing order so we can use Vec::split_off
node.children.sort_by(|l, r| {
let l_center = l.envelope().center();
let r_center = r.envelope().center();
l_center
.sub(¢er)
.length_2()
.partial_cmp(&(r_center.sub(¢er)).length_2())
.unwrap()
});
let num_children = node.children.len();
let result = node
.children
.split_off(num_children - Params::REINSERTION_COUNT);
node.envelope = envelope_for_children(&node.children);
result
}
| 35.257062 | 100 | 0.588414 |
e4165c0b356e7afc849462601fa959701f212326 | 1,041 | use swc_ecma_parser::{EsConfig, Syntax};
use swc_ecma_transforms_proposal::import_assertions;
use swc_ecma_transforms_testing::test;
use swc_ecma_visit::Fold;
fn tr() -> impl Fold {
import_assertions()
}
fn syntax() -> Syntax {
Syntax::Es(EsConfig {
import_assertions: true,
..Default::default()
})
}
test!(
syntax(),
|_| tr(),
import_with_assertions,
r#"import test from "./test.json" assert {type: "json"};"#,
r#"import test from "./test.json";"#
);
test!(
syntax(),
|_| tr(),
side_effect_import_with_assertions,
r#"import "./test.json" assert {type: "json"};"#,
r#"import "./test.json";"#
);
test!(
syntax(),
|_| tr(),
named_export_with_assertions,
r#"export {default as test} from "./test.json" assert {type: "json"};"#,
r#"export {default as test} from "./test.json";"#
);
test!(
syntax(),
|_| tr(),
export_all_with_assertions,
r#"export * from "./test.json" assert {type: "json"};"#,
r#"export * from "./test.json";"#
);
| 21.6875 | 76 | 0.600384 |
b91831a42c26aca5d07d306157a46950997b26b2 | 751 | // LNP/BP client-side-validation foundation libraries implementing LNPBP
// specifications & standards (LNPBP-4, 7, 8, 9, 42, 81)
//
// Written in 2019-2021 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the Apache 2.0 License along with this
// software. If not, see <https://opensource.org/licenses/Apache-2.0>.
#[macro_use]
extern crate strict_encoding_derive;
#[derive(NetworkEncode, NetworkDecode)]
#[network_encoding(use_tlv)]
enum Tlv {
CaseOne,
CaseTwo,
}
| 31.291667 | 76 | 0.739015 |
08bbd9bae863cf8cba61f4bb5c3357b11eebe492 | 896 | #![feature(plugin_registrar, rustc_private, trace_macros, convert, plugin)]
#![allow(unused_imports)]
#![allow(dead_code)]
extern crate syntax;
extern crate rustc;
use rustc::plugin::Registry;
use syntax::ext::base::{SyntaxExtension};
use syntax::parse::token;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
let nm = token::intern("model");
let ext = SyntaxExtension::MultiDecorator(Box::new(expand_model));
reg.register_syntax_extension(nm, ext);
reg.register_macro("default_attrs", expand_default_attrs);
}
//pub use models::model::Model;
//pub use models::columns;
pub use models::columns::options::ColumnOptions;
pub use models::{expand_model,expand_default_attrs,get_model_options};
pub use query::*;
pub use models::model::Model;
pub use models::options::ModelOptions;
pub use utils::attrs::{Attr,Attrs};
pub mod models;
pub mod query;
pub mod utils;
| 26.352941 | 75 | 0.748884 |
3abd492a8a7333f05213c63f5925978568f60147 | 4,559 | //! Definition of the [FenRecord] structure to manage a string containing a chess game position
//! in the [Forsyth–Edwards Notation](https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation) (FEN).
//!
// These will be useful later...
//use crate::chessdefines::{ArmyColour};
//use crate::bitboard::BitBoard;
// From Wikipedia (https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation):
// Forsyth–Edwards Notation (FEN) is a standard notation for describing a particular board
// position of a chess game. The purpose of FEN is to provide all the necessary information
// to restart a game from a particular position.
//
// FEN is based on a system developed by Scottish newspaper journalist David Forsyth.
// Forsyth's system became popular in the 19th century; Steven J. Edwards extended it
// to support use by computers. FEN is defined in the "Portable Game Notation
// Specification and Implementation Guide" (see documentation area). In the Portable
// Game Notation for chess games, FEN is used to define initial positions other than
// the standard one.[3] FEN does not provide sufficient information to decide whether
// a draw by threefold repetition may be legally claimed or a draw offer may be
// accepted; for that, a different format such as Extended Position Description is needed.
//
// A FEN "record" defines a particular game position, all in one text line and using
//only the ASCII character set. A text file with only FEN data records should have the
// file extension ".fen".[4]
//
// A FEN record contains six fields. The separator between fields is a space.
// The fields are:
//
// 1. Piece placement (from White's perspective). Each rank is described, starting with
// rank 8 and ending with rank 1; within each rank, the contents of each square are
// described from file "a" through file "h". Following the Standard Algebraic
// Notation (SAN), each piece is identified by a single letter taken from the
// standard English names (pawn = "P", knight = "N", bishop = "B", rook = "R",
// queen = "Q" and king = "K"). White pieces are designated using upper-case letters
// ("PNBRQK") while black pieces use lowercase ("pnbrqk"). Empty squares are noted
// using digits 1 through 8 (the number of empty squares), and "/" separates ranks.
// 2. Active color. "w" means White moves next, "b" means Black moves next.
// 3. Castling availability. If neither side can castle, this is "-". Otherwise, this
// has one or more letters: "K" (White can castle kingside), "Q" (White can castle
// queenside), "k" (Black can castle kingside), and/or "q" (Black can castle
// queenside). A move that temporarily prevents castling does not negate this
// notation.
// 4. En passant target square in algebraic notation. If there's no en passant target
// square, this is "-". If a pawn has just made a two-square move, this is the
// position "behind" the pawn. This is recorded regardless of whether there is a
// pawn in position to make an en passant capture.
// 5. Halfmove clock: This is the number of halfmoves since the last capture or pawn
// advance. The reason for this field is that the value is used in the fifty-move
// rule.
// 6. Fullmove number: The number of the full move. It starts at 1, and is incremented
// after Black's move.
//
// This will be useful later...
// const DELIM: char = ' ';
pub const INITIAL_STANDARD_POSITION: &str = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1";
pub const EMPTY_CHESS_BOARD: &str = "8/8/8/8/8/8/8/8 - - - 0 1";
pub struct FenRecord {
fen: String,
}
impl FenRecord {
/// Default constructor for the [FenRecord] struct: instantiate the initial chess standard position
pub fn new() -> FenRecord {
FenRecord { fen: INITIAL_STANDARD_POSITION.to_string() }
}
/// Return the current value of the FEN record as a string
/// # Example:
/// ```
/// # use abbadingo::fenrecord::{FenRecord, INITIAL_STANDARD_POSITION};
/// let fr = FenRecord::new();
/// assert_eq!(fr.fen(), INITIAL_STANDARD_POSITION)
///```
pub fn fen(&self) -> &str {
&self.fen
}
}
// ****************************************************************************
// TESTS
// ****************************************************************************
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_constructor_instantiate_initial_chess_standard_position() {
let fr = FenRecord::new();
assert_eq!(fr.fen(), INITIAL_STANDARD_POSITION)
}
} | 47.989474 | 108 | 0.680193 |
1c3442c24bdebdcc41def9a7e9ddd5a32628a670 | 108 | use grpc_lib::server;
#[tokio::main]
async fn main() {
server(([127, 0, 0, 1],19999).into()).await;
}
| 13.5 | 48 | 0.583333 |
186fd06d6c4d4cfb5beb0f9b9014c206b41c570e | 17,963 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
chained_bft::{
common::{Author, Height, Round},
consensus_types::{
quorum_cert::QuorumCert, vote_data::VoteData, vote_msg::VoteMsgVerificationError,
},
},
state_replication::ExecutedState,
};
use canonical_serialization::{
CanonicalDeserialize, CanonicalSerialize, CanonicalSerializer, SimpleSerializer,
};
use crypto::{
hash::{BlockHasher, CryptoHash, CryptoHasher, GENESIS_BLOCK_ID},
HashValue,
};
use failure::Result;
use mirai_annotations::{assumed_postcondition, checked_precondition, checked_precondition_eq};
use network::proto::Block as ProtoBlock;
use proto_conv::{FromProto, IntoProto};
use rmp_serde::{from_slice, to_vec_named};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{
collections::HashMap,
convert::TryFrom,
fmt::{Display, Formatter},
};
use types::{
crypto_proxies::{LedgerInfoWithSignatures, Signature, ValidatorSigner, ValidatorVerifier},
ledger_info::LedgerInfo,
};
#[cfg(test)]
#[path = "block_test.rs"]
pub mod block_test;
#[derive(Debug)]
pub enum BlockVerificationError {
/// Block hash is not equal to block id
InvalidBlockId,
/// Round must not be smaller than height and should be higher than parent's round.
InvalidBlockRound,
/// NIL block must not carry payload.
NilBlockWithPayload,
/// QC carried by the block does not certify its own parent.
QCDoesNotCertifyParent,
/// The verification of quorum cert of this block failed.
QCVerificationError(VoteMsgVerificationError),
/// The signature verification of this block failed.
SigVerifyError,
}
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
pub enum BlockSource {
Proposal {
/// Author of the block that can be validated by the author's public key and the signature
author: Author,
/// Signature that the hash of this block has been authored by the owner of the private key
signature: Signature,
},
/// NIL blocks don't have authors or signatures: they're generated upon timeouts to fill in the
/// gaps in the rounds.
NilBlock,
}
/// Blocks are managed in a speculative tree, the committed blocks form a chain.
/// Each block must know the id of its parent and keep the QuorurmCertificate to that parent.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
pub struct Block<T> {
/// This block's id as a hash value
id: HashValue,
/// Parent block id of this block as a hash value (all zeros to indicate the genesis block)
parent_id: HashValue,
/// T of the block (e.g. one or more transaction(s)
payload: T,
/// The round of a block is an internal monotonically increasing counter used by Consensus
/// protocol.
round: Round,
/// The height of a block is its position in the chain (block height = parent block height + 1)
height: Height,
/// The approximate physical time a block is proposed by a proposer. This timestamp is used
/// for
/// * Time-dependent logic in smart contracts (the current time of execution)
/// * Clients determining if they are relatively up-to-date with respect to the block chain.
///
/// It makes the following guarantees:
/// 1. Time Monotonicity: Time is monotonically increasing in the block
/// chain. (i.e. If H1 < H2, H1.Time < H2.Time).
/// 2. If a block of transactions B is agreed on with timestamp T, then at least f+1
/// honest replicas think that T is in the past. An honest replica will only vote
/// on a block when its own clock >= timestamp T.
/// 3. If a block of transactions B is agreed on with timestamp T, then at least f+1 honest
/// replicas saw the contents of B no later than T + delta for some delta.
/// If T = 3:00 PM and delta is 10 minutes, then an honest replica would not have
/// voted for B unless its clock was between 3:00 PM to 3:10 PM at the time the
/// proposal was received. After 3:10 PM, an honest replica would no longer vote
/// on B, noting it was too far in the past.
timestamp_usecs: u64,
/// Contains the quorum certified ancestor and whether the quorum certified ancestor was
/// voted on successfully
quorum_cert: QuorumCert,
/// If a block is a real proposal, contains its author and signature.
block_source: BlockSource,
}
impl<T> Display for Block<T> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
let nil_marker = if self.block_source == BlockSource::NilBlock {
" (NIL)"
} else {
""
};
write!(
f,
"[id: {}{}, round: {:02}, parent_id: {}]",
self.id, nil_marker, self.round, self.parent_id
)
}
}
impl<T> Block<T>
where
T: Serialize + Default + CanonicalSerialize + PartialEq,
{
// Make an empty genesis block
pub fn make_genesis_block() -> Self {
let ancestor_id = HashValue::zero();
let genesis_validator_signer = ValidatorSigner::genesis();
let state_id = ExecutedState::state_for_genesis().state_id;
// Genesis carries a placeholder quorum certificate to its parent id with LedgerInfo
// carrying information about version `0`.
let genesis_quorum_cert = QuorumCert::new(
VoteData::new(ancestor_id, state_id, 0, ancestor_id, 0, ancestor_id, 0),
LedgerInfoWithSignatures::new(
LedgerInfo::new(0, state_id, HashValue::zero(), HashValue::zero(), 0, 0),
HashMap::new(),
),
);
let genesis_id = *GENESIS_BLOCK_ID;
let signature = genesis_validator_signer
.sign_message(genesis_id)
.expect("Failed to sign genesis id.");
Block {
id: genesis_id,
payload: T::default(),
parent_id: HashValue::zero(),
round: 0,
height: 0,
timestamp_usecs: 0, // The beginning of UNIX TIME
quorum_cert: genesis_quorum_cert,
block_source: BlockSource::Proposal {
author: genesis_validator_signer.author(),
signature: signature.into(),
},
}
}
// Create a block directly. Most users should prefer make_block() as it ensures correct block
// chaining. This functionality should typically only be used for testing.
pub fn new_internal(
payload: T,
parent_id: HashValue,
round: Round,
height: Height,
timestamp_usecs: u64,
quorum_cert: QuorumCert,
validator_signer: &ValidatorSigner,
) -> Self {
let block_internal = BlockSerializer {
parent_id,
payload: &payload,
round,
height,
timestamp_usecs,
quorum_cert: &quorum_cert,
author: Some(validator_signer.author()),
};
let id = block_internal.hash();
let signature = validator_signer
.sign_message(id)
.expect("Failed to sign message");
Block {
id,
payload,
parent_id,
round,
height,
timestamp_usecs,
quorum_cert,
block_source: BlockSource::Proposal {
author: validator_signer.author(),
signature: signature.into(),
},
}
}
pub fn make_block(
parent_block: &Block<T>,
payload: T,
round: Round,
timestamp_usecs: u64,
quorum_cert: QuorumCert,
validator_signer: &ValidatorSigner,
) -> Self {
// A block must carry a QC to its parent.
checked_precondition_eq!(quorum_cert.certified_block_id(), parent_block.id());
checked_precondition!(round > parent_block.round());
// This precondition guards the addition overflow caused by passing
// parent_block.height() + 1 to new_internal.
checked_precondition!(parent_block.height() < std::u64::MAX);
Block::new_internal(
payload,
parent_block.id(),
round,
// Height is always parent's height + 1 because it's just the position in the chain.
parent_block.height() + 1,
timestamp_usecs,
quorum_cert,
validator_signer,
)
}
/// The NIL blocks are special: they're not carrying any real payload and are generated
/// independently by different validators just to fill in the round with some QC.
pub fn make_nil_block(parent_block: &Block<T>, round: Round, quorum_cert: QuorumCert) -> Self {
checked_precondition_eq!(quorum_cert.certified_block_id(), parent_block.id());
checked_precondition!(round > parent_block.round());
// This precondition guards the addition overflow caused by using
// parent_block.height() + 1 in the construction of BlockSerializer.
checked_precondition!(parent_block.height() < std::u64::MAX);
let payload = T::default();
// We want all the NIL blocks to agree on the timestamps even though they're generated
// independently by different validators, hence we're using the timestamp of a parent + 1.
// The reason for artificially adding 1 usec is to support execution state synchronization,
// which doesn't have any other way of determining the order of ledger infos rather than
// comparing their timestamps.
let timestamp_usecs = parent_block.timestamp_usecs + 1;
let block_serializer = BlockSerializer {
parent_id: parent_block.id(),
payload: &payload,
round,
height: parent_block.height() + 1,
timestamp_usecs,
quorum_cert: &quorum_cert,
// the author here doesn't really matter for as long as all the NIL Blocks are hashing
// the same value, hence use the special genesis author for hashing.
author: None,
};
let id = block_serializer.hash();
Block {
id,
payload,
parent_id: parent_block.id(),
round,
height: parent_block.height() + 1,
timestamp_usecs,
quorum_cert,
block_source: BlockSource::NilBlock,
}
}
pub fn get_payload(&self) -> &T {
&self.payload
}
pub fn verify(
&self,
validator: &ValidatorVerifier,
) -> ::std::result::Result<(), BlockVerificationError> {
if self.is_genesis_block() {
return Ok(());
}
if self.id() != self.hash() {
return Err(BlockVerificationError::InvalidBlockId);
}
if self.quorum_cert().certified_block_id() != self.parent_id() {
return Err(BlockVerificationError::QCDoesNotCertifyParent);
}
if self.quorum_cert().certified_block_round() >= self.round()
|| self.round() < self.height()
{
return Err(BlockVerificationError::InvalidBlockRound);
}
if let BlockSource::Proposal { author, signature } = &self.block_source {
signature
.verify(validator, *author, self.hash())
.map_err(|_| BlockVerificationError::SigVerifyError)?;
} else if self.payload != T::default() {
// NIL block must not carry payload
return Err(BlockVerificationError::NilBlockWithPayload);
}
self.quorum_cert
.verify(validator)
.map_err(BlockVerificationError::QCVerificationError)
}
pub fn id(&self) -> HashValue {
self.id
}
pub fn parent_id(&self) -> HashValue {
self.parent_id
}
pub fn height(&self) -> Height {
// Height:
// - Reasonable to assume that the height of the block chain will not grow enough to exceed
// std::u64::MAX - 1 in the next million years at least
// - The upper limit of std::u64::MAX - 1 ensures that the parent check doesn't
// cause addition overflow.
// (Block::make_block)
assumed_postcondition!(self.height < std::u64::MAX);
self.height
}
pub fn round(&self) -> Round {
// Round numbers:
// - are reset to 0 periodically.
// - do not exceed std::u64::MAX - 2 per the 3 chain safety rule
// (ConsensusState::commit_rule_for_certified_block)
assumed_postcondition!(self.round < std::u64::MAX - 1);
self.round
}
pub fn timestamp_usecs(&self) -> u64 {
self.timestamp_usecs
}
pub fn quorum_cert(&self) -> &QuorumCert {
&self.quorum_cert
}
pub fn author(&self) -> Option<Author> {
if let BlockSource::Proposal { author, .. } = self.block_source {
Some(author)
} else {
None
}
}
pub fn signature(&self) -> Option<&Signature> {
if let BlockSource::Proposal { signature, .. } = &self.block_source {
Some(signature)
} else {
None
}
}
pub fn is_genesis_block(&self) -> bool {
self.id() == *GENESIS_BLOCK_ID
&& self.payload == T::default()
&& self.parent_id == HashValue::zero()
&& self.round == 0
&& self.height == 0
&& self.timestamp_usecs == 0
}
pub fn is_nil_block(&self) -> bool {
self.block_source == BlockSource::NilBlock
}
}
impl<T> CryptoHash for Block<T>
where
T: canonical_serialization::CanonicalSerialize,
{
type Hasher = BlockHasher;
fn hash(&self) -> HashValue {
// The author value used by NIL blocks for calculating the hash is genesis.
let author = match self.block_source {
BlockSource::Proposal { author, .. } => Some(author),
BlockSource::NilBlock => None,
};
let block_internal = BlockSerializer {
parent_id: self.parent_id,
payload: &self.payload,
round: self.round,
height: self.height,
timestamp_usecs: self.timestamp_usecs,
quorum_cert: &self.quorum_cert,
author,
};
block_internal.hash()
}
}
// Internal use only. Contains all the fields in Block that contributes to the computation of
// Block Id
struct BlockSerializer<'a, T> {
parent_id: HashValue,
payload: &'a T,
round: Round,
height: Height,
timestamp_usecs: u64,
quorum_cert: &'a QuorumCert,
author: Option<Author>,
}
impl<'a, T> CryptoHash for BlockSerializer<'a, T>
where
T: CanonicalSerialize,
{
type Hasher = BlockHasher;
fn hash(&self) -> HashValue {
let bytes =
SimpleSerializer::<Vec<u8>>::serialize(self).expect("block serialization failed");
let mut state = Self::Hasher::default();
state.write(bytes.as_ref());
state.finish()
}
}
impl<'a, T> CanonicalSerialize for BlockSerializer<'a, T>
where
T: CanonicalSerialize,
{
fn serialize(&self, serializer: &mut impl CanonicalSerializer) -> Result<()> {
serializer
.encode_u64(self.timestamp_usecs)?
.encode_u64(self.round)?
.encode_u64(self.height)?
.encode_struct(self.payload)?
.encode_bytes(self.parent_id.as_ref())?
.encode_bytes(self.quorum_cert.certified_block_id().as_ref())?
.encode_optional(&self.author)?;
Ok(())
}
}
#[cfg(test)]
impl<T> Block<T>
where
T: Default + Serialize + CanonicalSerialize,
{
// Is this block a parent of the parameter block?
pub fn is_parent_of(&self, block: &Self) -> bool {
block.parent_id == self.id
}
}
impl<T> IntoProto for Block<T>
where
T: Serialize + Default + CanonicalSerialize + PartialEq,
{
type ProtoType = ProtoBlock;
fn into_proto(self) -> Self::ProtoType {
let mut proto = Self::ProtoType::new();
proto.set_timestamp_usecs(self.timestamp_usecs);
proto.set_id(self.id().into());
proto.set_parent_id(self.parent_id().into());
proto.set_payload(
to_vec_named(self.get_payload())
.expect("fail to serialize payload")
.into(),
);
proto.set_round(self.round());
proto.set_height(self.height());
proto.set_quorum_cert(self.quorum_cert().clone().into_proto());
if let BlockSource::Proposal { author, signature } = self.block_source {
let bytes = bytes::Bytes::from(&signature.to_bytes()[..]);
proto.set_signature(bytes);
proto.set_author(author.into());
}
proto
}
}
impl<T> FromProto for Block<T>
where
T: DeserializeOwned + CanonicalDeserialize,
{
type ProtoType = ProtoBlock;
fn from_proto(mut object: Self::ProtoType) -> Result<Self> {
let id = HashValue::from_slice(object.get_id())?;
let parent_id = HashValue::from_slice(object.get_parent_id())?;
let payload = from_slice(object.get_payload())?;
let timestamp_usecs = object.get_timestamp_usecs();
let round = object.get_round();
let height = object.get_height();
let quorum_cert = QuorumCert::from_proto(object.take_quorum_cert())?;
let block_source = if object.get_author().is_empty() {
BlockSource::NilBlock
} else {
BlockSource::Proposal {
author: Author::try_from(object.get_author())?,
signature: Signature::try_from(object.get_signature())?,
}
};
Ok(Block {
id,
parent_id,
payload,
round,
timestamp_usecs,
height,
quorum_cert,
block_source,
})
}
}
| 34.812016 | 99 | 0.609531 |
226ec44119a3e8120e46f2be10d6148996397940 | 14,741 | use super::common::UnusedAccounts;
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
use solana_sdk::abi_example::IgnoreAsHelper;
use {super::*, bincode::config::Options, solana_measure::measure::Measure, std::cell::RefCell};
const MAX_ACCOUNTS_DB_STREAM_SIZE: u64 = MAX_STREAM_SIZE;
type AccountsDbFields = super::AccountsDbFields<SerializableAccountStorageEntry>;
// Serializable version of AccountStorageEntry for snapshot format
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub(super) struct SerializableAccountStorageEntry {
id: AppendVecId,
accounts: SerializableAppendVec,
count_and_status: (usize, AccountStorageStatus),
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
impl IgnoreAsHelper for SerializableAccountStorageEntry {}
impl From<&AccountStorageEntry> for SerializableAccountStorageEntry {
fn from(rhs: &AccountStorageEntry) -> Self {
Self {
id: rhs.id,
accounts: SerializableAppendVec::from(&rhs.accounts),
..Self::default()
}
}
}
impl Into<AccountStorageEntry> for SerializableAccountStorageEntry {
fn into(self) -> AccountStorageEntry {
AccountStorageEntry::new_empty_map(self.id, self.accounts.current_len)
}
}
// Serializable version of AppendVec for snapshot format
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
struct SerializableAppendVec {
current_len: usize,
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
impl IgnoreAsHelper for SerializableAppendVec {}
impl From<&AppendVec> for SerializableAppendVec {
fn from(rhs: &AppendVec) -> SerializableAppendVec {
SerializableAppendVec {
current_len: rhs.len(),
}
}
}
impl Into<AppendVec> for SerializableAppendVec {
fn into(self) -> AppendVec {
AppendVec::new_empty_map(self.current_len)
}
}
// Serialization of AppendVec requires serialization of u64 to
// eight byte vector which is then itself serialized to the stream
impl Serialize for SerializableAppendVec {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
const LEN: usize = std::mem::size_of::<usize>();
let mut buf = [0u8; LEN];
serialize_into(Cursor::new(&mut buf[..]), &(self.current_len as u64))
.map_err(serde::ser::Error::custom)?;
serializer.serialize_bytes(&buf)
}
}
// Deserialization of AppendVec requires deserialization
// of eight byte vector from which u64 is then deserialized
impl<'de> Deserialize<'de> for SerializableAppendVec {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
use serde::de::Error;
struct SerializableAppendVecVisitor;
impl<'a> Visitor<'a> for SerializableAppendVecVisitor {
type Value = SerializableAppendVec;
fn expecting(&self, formatter: &mut Formatter) -> FormatResult {
formatter.write_str("Expecting SerializableAppendVec")
}
fn visit_bytes<E>(self, data: &[u8]) -> std::result::Result<Self::Value, E>
where
E: Error,
{
const LEN: u64 = std::mem::size_of::<usize>() as u64;
let mut rd = Cursor::new(&data[..]);
let current_len: usize = deserialize_from(&mut rd).map_err(Error::custom)?;
if rd.position() != LEN {
Err(Error::custom("SerializableAppendVec: unexpected length"))
} else {
Ok(SerializableAppendVec { current_len })
}
}
}
deserializer.deserialize_bytes(SerializableAppendVecVisitor)
}
}
// Deserializable version of Bank which need not be serializable,
// because it's handled by SerializableVersionedBank.
// So, sync fields with it!
#[derive(Clone, Deserialize)]
pub(crate) struct DeserializableVersionedBank {
pub(crate) blockhash_queue: BlockhashQueue,
pub(crate) ancestors: Ancestors,
pub(crate) hash: Hash,
pub(crate) parent_hash: Hash,
pub(crate) parent_slot: Slot,
pub(crate) hard_forks: HardForks,
pub(crate) transaction_count: u64,
pub(crate) tick_height: u64,
pub(crate) signature_count: u64,
pub(crate) capitalization: u64,
pub(crate) max_tick_height: u64,
pub(crate) hashes_per_tick: Option<u64>,
pub(crate) ticks_per_slot: u64,
pub(crate) ns_per_slot: u128,
pub(crate) genesis_creation_time: UnixTimestamp,
pub(crate) slots_per_year: f64,
pub(crate) unused: u64,
pub(crate) slot: Slot,
pub(crate) epoch: Epoch,
pub(crate) block_height: u64,
pub(crate) collector_id: Pubkey,
pub(crate) collector_fees: u64,
pub(crate) fee_calculator: FeeCalculator,
pub(crate) fee_rate_governor: FeeRateGovernor,
pub(crate) collected_rent: u64,
pub(crate) rent_collector: RentCollector,
pub(crate) epoch_schedule: EpochSchedule,
pub(crate) inflation: Inflation,
pub(crate) stakes: Stakes,
pub(crate) unused_accounts: UnusedAccounts,
pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
pub(crate) is_delta: bool,
pub(crate) message_processor: MessageProcessor,
}
impl Into<BankFieldsToDeserialize> for DeserializableVersionedBank {
fn into(self) -> BankFieldsToDeserialize {
BankFieldsToDeserialize {
blockhash_queue: self.blockhash_queue,
ancestors: self.ancestors,
hash: self.hash,
parent_hash: self.parent_hash,
parent_slot: self.parent_slot,
hard_forks: self.hard_forks,
transaction_count: self.transaction_count,
tick_height: self.tick_height,
signature_count: self.signature_count,
capitalization: self.capitalization,
max_tick_height: self.max_tick_height,
hashes_per_tick: self.hashes_per_tick,
ticks_per_slot: self.ticks_per_slot,
ns_per_slot: self.ns_per_slot,
genesis_creation_time: self.genesis_creation_time,
slots_per_year: self.slots_per_year,
unused: self.unused,
slot: self.slot,
epoch: self.epoch,
block_height: self.block_height,
collector_id: self.collector_id,
collector_fees: self.collector_fees,
fee_calculator: self.fee_calculator,
fee_rate_governor: self.fee_rate_governor,
collected_rent: self.collected_rent,
rent_collector: self.rent_collector,
epoch_schedule: self.epoch_schedule,
inflation: self.inflation,
stakes: self.stakes,
epoch_stakes: self.epoch_stakes,
is_delta: self.is_delta,
}
}
}
// Serializable version of Bank, not Deserializable to avoid cloning by using refs.
// Sync fields with DeserializableVersionedBank!
#[derive(Serialize)]
pub(crate) struct SerializableVersionedBank<'a> {
pub(crate) blockhash_queue: &'a RwLock<BlockhashQueue>,
pub(crate) ancestors: &'a Ancestors,
pub(crate) hash: Hash,
pub(crate) parent_hash: Hash,
pub(crate) parent_slot: Slot,
pub(crate) hard_forks: &'a RwLock<HardForks>,
pub(crate) transaction_count: u64,
pub(crate) tick_height: u64,
pub(crate) signature_count: u64,
pub(crate) capitalization: u64,
pub(crate) max_tick_height: u64,
pub(crate) hashes_per_tick: Option<u64>,
pub(crate) ticks_per_slot: u64,
pub(crate) ns_per_slot: u128,
pub(crate) genesis_creation_time: UnixTimestamp,
pub(crate) slots_per_year: f64,
pub(crate) unused: u64,
pub(crate) slot: Slot,
pub(crate) epoch: Epoch,
pub(crate) block_height: u64,
pub(crate) collector_id: Pubkey,
pub(crate) collector_fees: u64,
pub(crate) fee_calculator: FeeCalculator,
pub(crate) fee_rate_governor: FeeRateGovernor,
pub(crate) collected_rent: u64,
pub(crate) rent_collector: RentCollector,
pub(crate) epoch_schedule: EpochSchedule,
pub(crate) inflation: Inflation,
pub(crate) stakes: &'a RwLock<Stakes>,
pub(crate) unused_accounts: UnusedAccounts,
pub(crate) epoch_stakes: &'a HashMap<Epoch, EpochStakes>,
pub(crate) is_delta: bool,
pub(crate) message_processor: MessageProcessor,
}
use std::sync::RwLock;
impl<'a> From<crate::bank::BankFieldsToSerialize<'a>> for SerializableVersionedBank<'a> {
fn from(rhs: crate::bank::BankFieldsToSerialize<'a>) -> Self {
fn new<T: Default>() -> T {
T::default()
}
Self {
blockhash_queue: rhs.blockhash_queue,
ancestors: rhs.ancestors,
hash: rhs.hash,
parent_hash: rhs.parent_hash,
parent_slot: rhs.parent_slot,
hard_forks: rhs.hard_forks,
transaction_count: rhs.transaction_count,
tick_height: rhs.tick_height,
signature_count: rhs.signature_count,
capitalization: rhs.capitalization,
max_tick_height: rhs.max_tick_height,
hashes_per_tick: rhs.hashes_per_tick,
ticks_per_slot: rhs.ticks_per_slot,
ns_per_slot: rhs.ns_per_slot,
genesis_creation_time: rhs.genesis_creation_time,
slots_per_year: rhs.slots_per_year,
unused: rhs.unused,
slot: rhs.slot,
epoch: rhs.epoch,
block_height: rhs.block_height,
collector_id: rhs.collector_id,
collector_fees: rhs.collector_fees,
fee_calculator: rhs.fee_calculator,
fee_rate_governor: rhs.fee_rate_governor,
collected_rent: rhs.collected_rent,
rent_collector: rhs.rent_collector,
epoch_schedule: rhs.epoch_schedule,
inflation: rhs.inflation,
stakes: rhs.stakes,
unused_accounts: new(),
epoch_stakes: rhs.epoch_stakes,
is_delta: rhs.is_delta,
message_processor: new(),
}
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a> IgnoreAsHelper for SerializableVersionedBank<'a> {}
pub(super) struct Context {}
impl<'a> TypeContext<'a> for Context {
type SerializableAccountStorageEntry = SerializableAccountStorageEntry;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank_and_storage: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized,
{
(
SerializableVersionedBank::from(
serializable_bank_and_storage.bank.get_fields_to_serialize(),
),
SerializableAccountsDB::<'a, Self> {
accounts_db: &*serializable_bank_and_storage.bank.rc.accounts.accounts_db,
slot: serializable_bank_and_storage.bank.rc.slot,
account_storage_entries: serializable_bank_and_storage.snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
.serialize(serializer)
}
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized,
{
// sample write version before serializing storage entries
let version = serializable_db
.accounts_db
.write_version
.load(Ordering::Relaxed);
// (1st of 3 elements) write the list of account storage entry lists out as a map
let entry_count = RefCell::<usize>::new(0);
let entries =
serialize_iter_as_map(serializable_db.account_storage_entries.iter().map(|x| {
*entry_count.borrow_mut() += x.len();
(
x.first().unwrap().slot,
serialize_iter_as_seq(
x.iter()
.map(|x| Self::SerializableAccountStorageEntry::from(x.as_ref())),
),
)
}));
let slot_hash = (
serializable_db.slot,
serializable_db
.accounts_db
.bank_hashes
.read()
.unwrap()
.get(&serializable_db.slot)
.unwrap_or_else(|| panic!("No bank_hashes entry for slot {}", serializable_db.slot))
.clone(),
);
// as there is no deserialize_bank_rc_fields(), emit two u64 size fields here instead
let mut serialize_account_storage_timer = Measure::start("serialize_account_storage_ms");
let result = (
&MAX_ACCOUNTS_DB_STREAM_SIZE,
&MAX_ACCOUNTS_DB_STREAM_SIZE,
&entries,
&version,
&slot_hash,
)
.serialize(serializer);
serialize_account_storage_timer.stop();
datapoint_info!(
"serialize_account_storage_ms",
("duration", serialize_account_storage_timer.as_ms(), i64),
("num_entries", *entry_count.borrow(), i64),
);
result
}
fn deserialize_bank_fields<R>(
mut stream: &mut BufReader<R>,
) -> Result<(BankFieldsToDeserialize, AccountsDbFields), Error>
where
R: Read,
{
let bank_fields = deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?.into();
let accounts_db_fields = Self::deserialize_accounts_db_fields(stream)?;
Ok((bank_fields, accounts_db_fields))
}
fn deserialize_accounts_db_fields<R>(
mut stream: &mut BufReader<R>,
) -> Result<AccountsDbFields, Error>
where
R: Read,
{
// read and discard two u64 byte vector lengths
let serialized_len = MAX_ACCOUNTS_DB_STREAM_SIZE;
let serialized_len = min(serialized_len, deserialize_from(&mut stream)?);
let serialized_len = min(serialized_len, deserialize_from(&mut stream)?);
// (1st of 3 elements) read in map of slots to account storage entries
let storage: HashMap<Slot, Vec<Self::SerializableAccountStorageEntry>> = bincode::options()
.with_limit(serialized_len)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from(&mut stream)?;
// (2nd of 3 elements) read in write version
let version: u64 = deserialize_from(&mut stream)?;
// (3rd of 3 elements) read in (slot, bank hashes) pair
let (slot, bank_hash_info): (Slot, BankHashInfo) = deserialize_from(&mut stream)?;
Ok(AccountsDbFields(storage, version, slot, bank_hash_info))
}
}
| 37.604592 | 100 | 0.641748 |
8f895c33a63280f6e376426ae889f79ab9e7fc1d | 42,747 | //! Definitions of integer that is known not to equal zero.
use crate::fmt;
use crate::ops::{BitOr, BitOrAssign, Div, Rem};
use crate::str::FromStr;
use super::from_str_radix;
use super::{IntErrorKind, ParseIntError};
use crate::intrinsics;
macro_rules! impl_nonzero_fmt {
( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => {
$(
#[$stability]
impl fmt::$Trait for $Ty {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
}
)+
}
}
macro_rules! nonzero_integers {
( $( #[$stability: meta] #[$const_new_unchecked_stability: meta] $Ty: ident($Int: ty); )+ ) => {
$(
/// An integer that is known not to equal zero.
///
/// This enables some memory layout optimization.
#[doc = concat!("For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:")]
///
/// ```rust
/// use std::mem::size_of;
#[doc = concat!("assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int), ">());")]
/// ```
#[$stability]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
#[rustc_layout_scalar_valid_range_start(1)]
#[rustc_nonnull_optimization_guaranteed]
pub struct $Ty($Int);
impl $Ty {
/// Creates a non-zero without checking whether the value is non-zero.
/// This results in undefined behaviour if the value is zero.
///
/// # Safety
///
/// The value must not be zero.
#[$stability]
#[$const_new_unchecked_stability]
#[must_use]
#[inline]
pub const unsafe fn new_unchecked(n: $Int) -> Self {
// SAFETY: this is guaranteed to be safe by the caller.
unsafe { Self(n) }
}
/// Creates a non-zero if the given value is not zero.
#[$stability]
#[rustc_const_stable(feature = "const_nonzero_int_methods", since = "1.47.0")]
#[must_use]
#[inline]
pub const fn new(n: $Int) -> Option<Self> {
if n != 0 {
// SAFETY: we just checked that there's no `0`
Some(unsafe { Self(n) })
} else {
None
}
}
/// Returns the value as a primitive type.
#[$stability]
#[inline]
#[rustc_const_stable(feature = "nonzero", since = "1.34.0")]
pub const fn get(self) -> $Int {
self.0
}
}
#[stable(feature = "from_nonzero", since = "1.31.0")]
#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
impl const From<$Ty> for $Int {
#[doc = concat!("Converts a `", stringify!($Ty), "` into an `", stringify!($Int), "`")]
#[inline]
fn from(nonzero: $Ty) -> Self {
nonzero.0
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const BitOr for $Ty {
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self::Output {
// SAFETY: since `self` and `rhs` are both nonzero, the
// result of the bitwise-or will be nonzero.
unsafe { $Ty::new_unchecked(self.get() | rhs.get()) }
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const BitOr<$Int> for $Ty {
type Output = Self;
#[inline]
fn bitor(self, rhs: $Int) -> Self::Output {
// SAFETY: since `self` is nonzero, the result of the
// bitwise-or will be nonzero regardless of the value of
// `rhs`.
unsafe { $Ty::new_unchecked(self.get() | rhs) }
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const BitOr<$Ty> for $Int {
type Output = $Ty;
#[inline]
fn bitor(self, rhs: $Ty) -> Self::Output {
// SAFETY: since `rhs` is nonzero, the result of the
// bitwise-or will be nonzero regardless of the value of
// `self`.
unsafe { $Ty::new_unchecked(self | rhs.get()) }
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const BitOrAssign for $Ty {
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
*self = *self | rhs;
}
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const BitOrAssign<$Int> for $Ty {
#[inline]
fn bitor_assign(&mut self, rhs: $Int) {
*self = *self | rhs;
}
}
impl_nonzero_fmt! {
#[$stability] (Debug, Display, Binary, Octal, LowerHex, UpperHex) for $Ty
}
)+
}
}
nonzero_integers! {
#[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU8(u8);
#[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU16(u16);
#[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU32(u32);
#[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU64(u64);
#[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroU128(u128);
#[stable(feature = "nonzero", since = "1.28.0")] #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] NonZeroUsize(usize);
#[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI8(i8);
#[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI16(i16);
#[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI32(i32);
#[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI64(i64);
#[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI128(i128);
#[stable(feature = "signed_nonzero", since = "1.34.0")] #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroIsize(isize);
}
macro_rules! from_str_radix_nzint_impl {
($($t:ty)*) => {$(
#[stable(feature = "nonzero_parse", since = "1.35.0")]
impl FromStr for $t {
type Err = ParseIntError;
fn from_str(src: &str) -> Result<Self, Self::Err> {
Self::new(from_str_radix(src, 10)?)
.ok_or(ParseIntError {
kind: IntErrorKind::Zero
})
}
}
)*}
}
from_str_radix_nzint_impl! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize
NonZeroI8 NonZeroI16 NonZeroI32 NonZeroI64 NonZeroI128 NonZeroIsize }
macro_rules! nonzero_leading_trailing_zeros {
( $( $Ty: ident($Uint: ty) , $LeadingTestExpr:expr ;)+ ) => {
$(
impl $Ty {
/// Returns the number of leading zeros in the binary representation of `self`.
///
/// On many architectures, this function can perform better than `leading_zeros()` on the underlying integer type, as special handling of zero can be avoided.
///
/// # Examples
///
/// Basic usage:
///
/// ```
#[doc = concat!("let n = std::num::", stringify!($Ty), "::new(", stringify!($LeadingTestExpr), ").unwrap();")]
///
/// assert_eq!(n.leading_zeros(), 0);
/// ```
#[stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
#[rustc_const_stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` can not be zero it is safe to call ctlz_nonzero
unsafe { intrinsics::ctlz_nonzero(self.0 as $Uint) as u32 }
}
/// Returns the number of trailing zeros in the binary representation
/// of `self`.
///
/// On many architectures, this function can perform better than `trailing_zeros()` on the underlying integer type, as special handling of zero can be avoided.
///
/// # Examples
///
/// Basic usage:
///
/// ```
#[doc = concat!("let n = std::num::", stringify!($Ty), "::new(0b0101000).unwrap();")]
///
/// assert_eq!(n.trailing_zeros(), 3);
/// ```
#[stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
#[rustc_const_stable(feature = "nonzero_leading_trailing_zeros", since = "1.53.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` can not be zero it is safe to call cttz_nonzero
unsafe { intrinsics::cttz_nonzero(self.0 as $Uint) as u32 }
}
}
)+
}
}
nonzero_leading_trailing_zeros! {
NonZeroU8(u8), u8::MAX;
NonZeroU16(u16), u16::MAX;
NonZeroU32(u32), u32::MAX;
NonZeroU64(u64), u64::MAX;
NonZeroU128(u128), u128::MAX;
NonZeroUsize(usize), usize::MAX;
NonZeroI8(u8), -1i8;
NonZeroI16(u16), -1i16;
NonZeroI32(u32), -1i32;
NonZeroI64(u64), -1i64;
NonZeroI128(u128), -1i128;
NonZeroIsize(usize), -1isize;
}
macro_rules! nonzero_integers_div {
( $( $Ty: ident($Int: ty); )+ ) => {
$(
#[stable(feature = "nonzero_div", since = "1.51.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const Div<$Ty> for $Int {
type Output = $Int;
/// This operation rounds towards zero,
/// truncating any fractional part of the exact result, and cannot panic.
#[inline]
fn div(self, other: $Ty) -> $Int {
// SAFETY: div by zero is checked because `other` is a nonzero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { crate::intrinsics::unchecked_div(self, other.get()) }
}
}
#[stable(feature = "nonzero_div", since = "1.51.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl const Rem<$Ty> for $Int {
type Output = $Int;
/// This operation satisfies `n % d == n - (n / d) * d`, and cannot panic.
#[inline]
fn rem(self, other: $Ty) -> $Int {
// SAFETY: rem by zero is checked because `other` is a nonzero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { crate::intrinsics::unchecked_rem(self, other.get()) }
}
}
)+
}
}
nonzero_integers_div! {
NonZeroU8(u8);
NonZeroU16(u16);
NonZeroU32(u32);
NonZeroU64(u64);
NonZeroU128(u128);
NonZeroUsize(usize);
}
// A bunch of methods for unsigned nonzero types only.
macro_rules! nonzero_unsigned_operations {
( $( $Ty: ident($Int: ty); )+ ) => {
$(
impl $Ty {
/// Add an unsigned integer to a non-zero value.
/// Check for overflow and return [`None`] on overflow
/// As a consequence, the result cannot wrap to zero.
///
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(Some(two), one.checked_add(1));
/// assert_eq!(None, max.checked_add(1));
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_add(self, other: $Int) -> Option<$Ty> {
if let Some(result) = self.get().checked_add(other) {
// SAFETY: $Int::checked_add returns None on overflow
// so the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
}
}
/// Add an unsigned integer to a non-zero value.
#[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(two, one.saturating_add(1));
/// assert_eq!(max, max.saturating_add(1));
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_add(self, other: $Int) -> $Ty {
// SAFETY: $Int::saturating_add returns $Int::MAX on overflow
// so the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_add(other)) }
}
/// Add an unsigned integer to a non-zero value,
/// assuming overflow cannot occur.
/// Overflow is unchecked, and it is undefined behaviour to overflow
/// *even if the result would wrap to a non-zero value*.
/// The behaviour is undefined as soon as
#[doc = concat!("`self + rhs > ", stringify!($Int), "::MAX`.")]
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
///
/// assert_eq!(two, unsafe { one.unchecked_add(1) });
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const unsafe fn unchecked_add(self, other: $Int) -> $Ty {
// SAFETY: The caller ensures there is no overflow.
unsafe { $Ty::new_unchecked(self.get().unchecked_add(other)) }
}
/// Returns the smallest power of two greater than or equal to n.
/// Check for overflow and return [`None`]
/// if the next power of two is greater than the type’s maximum value.
/// As a consequence, the result cannot wrap to zero.
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
#[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(Some(two), two.checked_next_power_of_two() );
/// assert_eq!(Some(four), three.checked_next_power_of_two() );
/// assert_eq!(None, max.checked_next_power_of_two() );
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_next_power_of_two(self) -> Option<$Ty> {
if let Some(nz) = self.get().checked_next_power_of_two() {
// SAFETY: The next power of two is positive
// and overflow is checked.
Some(unsafe { $Ty::new_unchecked(nz) })
} else {
None
}
}
}
)+
}
}
nonzero_unsigned_operations! {
NonZeroU8(u8);
NonZeroU16(u16);
NonZeroU32(u32);
NonZeroU64(u64);
NonZeroU128(u128);
NonZeroUsize(usize);
}
// A bunch of methods for signed nonzero types only.
macro_rules! nonzero_signed_operations {
( $( $Ty: ident($Int: ty) -> $Uty: ident($Uint: ty); )+ ) => {
$(
impl $Ty {
/// Computes the absolute value of self.
#[doc = concat!("See [`", stringify!($Int), "::abs`]")]
/// for documentation on overflow behaviour.
///
/// # Example
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
///
/// assert_eq!(pos, pos.abs());
/// assert_eq!(pos, neg.abs());
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn abs(self) -> $Ty {
// SAFETY: This cannot overflow to zero.
unsafe { $Ty::new_unchecked(self.get().abs()) }
}
/// Checked absolute value.
/// Check for overflow and returns [`None`] if
#[doc = concat!("`self == ", stringify!($Int), "::MIN`.")]
/// The result cannot be zero.
///
/// # Example
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
///
/// assert_eq!(Some(pos), neg.checked_abs());
/// assert_eq!(None, min.checked_abs());
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_abs(self) -> Option<$Ty> {
if let Some(nz) = self.get().checked_abs() {
// SAFETY: absolute value of nonzero cannot yield zero values.
Some(unsafe { $Ty::new_unchecked(nz) })
} else {
None
}
}
/// Computes the absolute value of self,
/// with overflow information, see
#[doc = concat!("[`", stringify!($Int), "::overflowing_abs`].")]
///
/// # Example
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
///
/// assert_eq!((pos, false), pos.overflowing_abs());
/// assert_eq!((pos, false), neg.overflowing_abs());
/// assert_eq!((min, true), min.overflowing_abs());
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn overflowing_abs(self) -> ($Ty, bool) {
let (nz, flag) = self.get().overflowing_abs();
(
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(nz) },
flag,
)
}
/// Saturating absolute value, see
#[doc = concat!("[`", stringify!($Int), "::saturating_abs`].")]
///
/// # Example
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
#[doc = concat!("let min_plus = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN + 1)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(pos, pos.saturating_abs());
/// assert_eq!(pos, neg.saturating_abs());
/// assert_eq!(max, min.saturating_abs());
/// assert_eq!(max, min_plus.saturating_abs());
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_abs(self) -> $Ty {
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(self.get().saturating_abs()) }
}
/// Wrapping absolute value, see
#[doc = concat!("[`", stringify!($Int), "::wrapping_abs`].")]
///
/// # Example
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(pos, pos.wrapping_abs());
/// assert_eq!(pos, neg.wrapping_abs());
/// assert_eq!(min, min.wrapping_abs());
/// # // FIXME: add once Neg is implemented?
/// # // assert_eq!(max, (-max).wrapping_abs());
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn wrapping_abs(self) -> $Ty {
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Ty::new_unchecked(self.get().wrapping_abs()) }
}
/// Computes the absolute value of self
/// without any wrapping or panicking.
///
/// # Example
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("# use std::num::", stringify!($Uty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let u_pos = ", stringify!($Uty), "::new(1)?;")]
#[doc = concat!("let i_pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let i_neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let i_min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
#[doc = concat!("let u_max = ", stringify!($Uty), "::new(",
stringify!($Uint), "::MAX / 2 + 1)?;")]
///
/// assert_eq!(u_pos, i_pos.unsigned_abs());
/// assert_eq!(u_pos, i_neg.unsigned_abs());
/// assert_eq!(u_max, i_min.unsigned_abs());
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn unsigned_abs(self) -> $Uty {
// SAFETY: absolute value of nonzero cannot yield zero values.
unsafe { $Uty::new_unchecked(self.get().unsigned_abs()) }
}
}
)+
}
}
nonzero_signed_operations! {
NonZeroI8(i8) -> NonZeroU8(u8);
NonZeroI16(i16) -> NonZeroU16(u16);
NonZeroI32(i32) -> NonZeroU32(u32);
NonZeroI64(i64) -> NonZeroU64(u64);
NonZeroI128(i128) -> NonZeroU128(u128);
NonZeroIsize(isize) -> NonZeroUsize(usize);
}
// A bunch of methods for both signed and unsigned nonzero types.
macro_rules! nonzero_unsigned_signed_operations {
( $( $signedness:ident $Ty: ident($Int: ty); )+ ) => {
$(
impl $Ty {
/// Multiply two non-zero integers together.
/// Check for overflow and return [`None`] on overflow.
/// As a consequence, the result cannot wrap to zero.
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
#[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(Some(four), two.checked_mul(two));
/// assert_eq!(None, max.checked_mul(two));
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_mul(self, other: $Ty) -> Option<$Ty> {
if let Some(result) = self.get().checked_mul(other.get()) {
// SAFETY: checked_mul returns None on overflow
// and `other` is also non-null
// so the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
}
}
/// Multiply two non-zero integers together.
#[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
#[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(four, two.saturating_mul(two));
/// assert_eq!(max, four.saturating_mul(max));
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_mul(self, other: $Ty) -> $Ty {
// SAFETY: saturating_mul returns u*::MAX on overflow
// and `other` is also non-null
// so the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_mul(other.get())) }
}
/// Multiply two non-zero integers together,
/// assuming overflow cannot occur.
/// Overflow is unchecked, and it is undefined behaviour to overflow
/// *even if the result would wrap to a non-zero value*.
/// The behaviour is undefined as soon as
#[doc = sign_dependent_expr!{
$signedness ?
if signed {
concat!("`self * rhs > ", stringify!($Int), "::MAX`, ",
"or `self * rhs < ", stringify!($Int), "::MIN`.")
}
if unsigned {
concat!("`self * rhs > ", stringify!($Int), "::MAX`.")
}
}]
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
#[doc = concat!("let four = ", stringify!($Ty), "::new(4)?;")]
///
/// assert_eq!(four, unsafe { two.unchecked_mul(two) });
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const unsafe fn unchecked_mul(self, other: $Ty) -> $Ty {
// SAFETY: The caller ensures there is no overflow.
unsafe { $Ty::new_unchecked(self.get().unchecked_mul(other.get())) }
}
/// Raise non-zero value to an integer power.
/// Check for overflow and return [`None`] on overflow.
/// As a consequence, the result cannot wrap to zero.
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
#[doc = concat!("let twenty_seven = ", stringify!($Ty), "::new(27)?;")]
#[doc = concat!("let half_max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX / 2)?;")]
///
/// assert_eq!(Some(twenty_seven), three.checked_pow(3));
/// assert_eq!(None, half_max.checked_pow(3));
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn checked_pow(self, other: u32) -> Option<$Ty> {
if let Some(result) = self.get().checked_pow(other) {
// SAFETY: checked_pow returns None on overflow
// so the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
}
}
/// Raise non-zero value to an integer power.
#[doc = sign_dependent_expr!{
$signedness ?
if signed {
concat!("Return [`", stringify!($Int), "::MIN`] ",
"or [`", stringify!($Int), "::MAX`] on overflow.")
}
if unsigned {
concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")
}
}]
///
/// # Examples
///
/// ```
/// #![feature(nonzero_ops)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
#[doc = concat!("let twenty_seven = ", stringify!($Ty), "::new(27)?;")]
#[doc = concat!("let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(twenty_seven, three.saturating_pow(3));
/// assert_eq!(max, max.saturating_pow(3));
/// # Some(())
/// # }
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn saturating_pow(self, other: u32) -> $Ty {
// SAFETY: saturating_pow returns u*::MAX on overflow
// so the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_pow(other)) }
}
}
)+
}
}
// Use this when the generated code should differ between signed and unsigned types.
macro_rules! sign_dependent_expr {
(signed ? if signed { $signed_case:expr } if unsigned { $unsigned_case:expr } ) => {
$signed_case
};
(unsigned ? if signed { $signed_case:expr } if unsigned { $unsigned_case:expr } ) => {
$unsigned_case
};
}
nonzero_unsigned_signed_operations! {
unsigned NonZeroU8(u8);
unsigned NonZeroU16(u16);
unsigned NonZeroU32(u32);
unsigned NonZeroU64(u64);
unsigned NonZeroU128(u128);
unsigned NonZeroUsize(usize);
signed NonZeroI8(i8);
signed NonZeroI16(i16);
signed NonZeroI32(i32);
signed NonZeroI64(i64);
signed NonZeroI128(i128);
signed NonZeroIsize(isize);
}
macro_rules! nonzero_unsigned_is_power_of_two {
( $( $Ty: ident )+ ) => {
$(
impl $Ty {
/// Returns `true` if and only if `self == (1 << k)` for some `k`.
///
/// On many architectures, this function can perform better than `is_power_of_two()`
/// on the underlying integer type, as special handling of zero can be avoided.
///
/// # Examples
///
/// Basic usage:
///
/// ```
#[doc = concat!("let eight = std::num::", stringify!($Ty), "::new(8).unwrap();")]
/// assert!(eight.is_power_of_two());
#[doc = concat!("let ten = std::num::", stringify!($Ty), "::new(10).unwrap();")]
/// assert!(!ten.is_power_of_two());
/// ```
#[must_use]
#[stable(feature = "nonzero_is_power_of_two", since = "1.59.0")]
#[inline]
pub const fn is_power_of_two(self) -> bool {
// LLVM 11 normalizes `unchecked_sub(x, 1) & x == 0` to the implementation seen here.
// On the basic x86-64 target, this saves 3 instructions for the zero check.
// On x86_64 with BMI1, being nonzero lets it codegen to `BLSR`, which saves an instruction
// compared to the `POPCNT` implementation on the underlying integer type.
intrinsics::ctpop(self.get()) < 2
}
}
)+
}
}
nonzero_unsigned_is_power_of_two! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize }
| 45.427205 | 175 | 0.430557 |
39814f0cf84b6d9ee87b9281ade7aee1f733e5e9 | 3,659 | use futures::{future, TryFutureExt};
use linkerd_app_core::{errors::IdentityRequired, identity, svc, tls, Conditional, Error};
use std::task::{Context, Poll};
use tracing::{debug, trace};
const HEADER_NAME: &str = "l5d-require-id";
#[derive(Clone, Debug)]
pub(super) struct NewRequireIdentity<N> {
inner: N,
}
#[derive(Clone, Debug)]
pub(super) struct RequireIdentity<N> {
tls: tls::ConditionalClientTls,
inner: N,
}
// === impl NewRequireIdentity ===
impl<N> NewRequireIdentity<N> {
fn new(inner: N) -> Self {
Self { inner }
}
pub fn layer() -> impl svc::layer::Layer<N, Service = Self> + Clone + Copy {
svc::layer::mk(Self::new)
}
}
impl<T, N> svc::NewService<T> for NewRequireIdentity<N>
where
T: svc::Param<tls::ConditionalClientTls>,
N: svc::NewService<T>,
{
type Service = RequireIdentity<N::Service>;
fn new_service(&mut self, target: T) -> Self::Service {
let tls = target.param();
let inner = self.inner.new_service(target);
RequireIdentity { tls, inner }
}
}
// === impl RequireIdentity ===
type ResponseFuture<F, T, E> =
future::Either<future::Ready<Result<T, Error>>, future::MapErr<F, fn(E) -> Error>>;
impl<S> RequireIdentity<S> {
#[inline]
fn extract_id<B>(req: &mut http::Request<B>) -> Option<identity::Name> {
let v = req.headers_mut().remove(HEADER_NAME)?;
v.to_str().ok()?.parse().ok()
}
}
impl<S, B> svc::Service<http::Request<B>> for RequireIdentity<S>
where
S: svc::Service<http::Request<B>>,
S::Error: Into<Error>,
{
type Response = S::Response;
type Error = Error;
type Future = ResponseFuture<S::Future, S::Response, S::Error>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, mut request: http::Request<B>) -> Self::Future {
// If the `l5d-require-id` header is present, then we should expect the target's
// `peer_identity` to match; if the two values do not match or there is no `peer_identity`,
// then we fail the request.
//
// In either case, we clear the header so it is not passed on outbound requests.
if let Some(require_id) = Self::extract_id(&mut request) {
match self.tls.as_ref() {
Conditional::Some(tls::ClientTls { server_id, .. }) => {
if require_id != *server_id.as_ref() {
debug!(
required = %require_id,
found = %server_id,
"Identity required by header not satisfied"
);
let e = IdentityRequired {
required: require_id.into(),
found: Some(server_id.clone()),
};
return future::Either::Left(future::err(e.into()));
} else {
trace!(required = %require_id, "Identity required by header");
}
}
Conditional::None(_) => {
debug!(required = %require_id, "Identity required by header not satisfied");
let e = IdentityRequired {
required: require_id.into(),
found: None,
};
return future::Either::Left(future::err(e.into()));
}
}
}
future::Either::Right(self.inner.call(request).map_err(Into::into))
}
}
| 33.263636 | 99 | 0.541951 |
dbba2075071bb944790228a78aafc2acd65f2099 | 396 | pub mod acl;
pub mod client;
pub mod cluster;
pub mod config;
pub mod geo;
pub mod hashes;
pub mod hyperloglog;
pub mod keys;
pub mod lists;
pub mod lua;
pub mod memory;
pub mod metrics;
pub mod pubsub;
pub mod scan;
pub mod server;
pub mod sets;
pub mod slowlog;
pub mod sorted_sets;
pub mod streams;
pub mod strings;
pub mod transactions;
#[cfg(feature = "sentinel-client")]
pub mod sentinel;
| 15.84 | 35 | 0.744949 |
d9794ac6a76e3590ade0a32daa884de72c72ecda | 4,184 | use super::*;
use crate::vulkan::{RafxDeviceContextVulkan, RafxRenderpassVulkan};
use crate::*;
use fnv::FnvHasher;
use std::hash::{Hash, Hasher};
pub(crate) struct RafxFramebufferVulkanCache {
cache: LruCache<RafxFramebufferVulkan>,
}
impl RafxFramebufferVulkanCache {
pub(crate) fn new(max_count: usize) -> Self {
RafxFramebufferVulkanCache {
cache: LruCache::new(max_count),
}
}
pub(crate) fn clear(&mut self) {
self.cache.clear();
}
pub(crate) fn framebuffer_hash(
color_targets: &[RafxColorRenderTargetBinding],
depth_target: Option<&RafxDepthStencilRenderTargetBinding>,
) -> u64 {
let mut hasher = FnvHasher::default();
for color_target in color_targets {
color_target
.texture
.vk_texture()
.unwrap()
.texture_id()
.hash(&mut hasher);
color_target.mip_slice.hash(&mut hasher);
color_target.array_slice.hash(&mut hasher);
if let Some(resolve_target) = color_target.resolve_target {
resolve_target
.vk_texture()
.unwrap()
.texture_id()
.hash(&mut hasher);
color_target.resolve_mip_slice.hash(&mut hasher);
color_target.resolve_array_slice.hash(&mut hasher);
}
}
if let Some(depth_target) = &depth_target {
depth_target
.texture
.vk_texture()
.unwrap()
.texture_id()
.hash(&mut hasher);
depth_target.mip_slice.hash(&mut hasher);
depth_target.array_slice.hash(&mut hasher);
}
hasher.finish()
}
pub(crate) fn create_framebuffer(
device_context: &RafxDeviceContextVulkan,
renderpass: &RafxRenderpassVulkan,
color_targets: &[RafxColorRenderTargetBinding],
depth_target: Option<&RafxDepthStencilRenderTargetBinding>,
) -> RafxResult<RafxFramebufferVulkan> {
let mut color_attachments = Vec::with_capacity(color_targets.len());
let mut resolve_attachments = Vec::with_capacity(color_targets.len());
for color_target in color_targets {
color_attachments.push(RafxFramebufferVulkanAttachment {
texture: color_target.texture.vk_texture().unwrap().clone(),
array_slice: color_target.array_slice,
mip_slice: color_target.mip_slice,
});
if let Some(resolve_target) = color_target.resolve_target {
resolve_attachments.push(RafxFramebufferVulkanAttachment {
texture: resolve_target.vk_texture().unwrap().clone(),
array_slice: color_target.resolve_array_slice,
mip_slice: color_target.resolve_mip_slice,
})
}
}
RafxFramebufferVulkan::new(
device_context,
&RafxFramebufferVulkanDef {
renderpass: renderpass.clone(),
color_attachments,
resolve_attachments,
depth_stencil_attachment: depth_target.as_ref().map(|x| {
RafxFramebufferVulkanAttachment {
texture: x.texture.vk_texture().unwrap().clone(),
array_slice: x.array_slice,
mip_slice: x.mip_slice,
}
}),
},
)
}
pub(crate) fn get_or_create_framebuffer(
&mut self,
device_context: &RafxDeviceContextVulkan,
renderpass: &RafxRenderpassVulkan,
color_targets: &[RafxColorRenderTargetBinding],
depth_target: Option<&RafxDepthStencilRenderTargetBinding>,
) -> RafxResult<RafxFramebufferVulkan> {
//
// Hash it
//
let hash = Self::framebuffer_hash(color_targets, depth_target);
self.cache.get_or_create(hash, || {
Self::create_framebuffer(device_context, renderpass, color_targets, depth_target)
})
}
}
| 34.866667 | 93 | 0.577677 |
62b6925571a36c5f89c5cb9e5fbaf86a90c6be74 | 1,455 | //! The module implements a Version structure.
//! A version is divided into the following component:
//! - Major: Version including breaking changes
//! - Minor: Version including new features
//! - Patch: Version including bug fixes and optimizations
use core::cmp::Ordering;
use core::fmt::Display;
use core::fmt::Error;
use core::fmt::Formatter;
/// Structure representing a version.
#[derive(Clone, Debug, Eq)]
pub struct Version {
/// The major version
pub major: u16,
/// The minor version
pub minor: u16,
/// The patch version
pub patch: u16,
}
impl Version {
/// Creates a new instance.
pub fn new(major: u16, minor: u16, patch: u16) -> Self {
Self {
major,
minor,
patch,
}
}
// TODO to_string
}
impl Ord for Version {
fn cmp(&self, other: &Self) -> Ordering {
let mut ord = self.major.cmp(&other.major);
if ord != Ordering::Equal {
return ord;
}
ord = self.minor.cmp(&other.minor);
if ord != Ordering::Equal {
return ord;
}
self.patch.cmp(&other.patch)
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Version {
fn eq(&self, other: &Self) -> bool {
self.major == other.major && self.minor == other.minor && self.patch == other.patch
}
}
impl Display for Version {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> {
write!(fmt, "{}.{}.{}", self.major, self.minor, self.patch)
}
}
| 21.086957 | 85 | 0.650859 |
563104494d616a3ff863e5b04ae77f20a10061c7 | 919 | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
#![warn(missing_docs)]
//! Driver for timely/differential dataflow.
pub mod boundary;
#[cfg(feature = "server")]
pub(crate) mod decode;
#[cfg(feature = "server")]
pub(crate) mod render;
#[cfg(feature = "server")]
pub(crate) mod server;
#[cfg(feature = "server")]
pub mod source;
#[cfg(feature = "server")]
pub mod storage_state;
pub use boundary::{tcp_boundary, ComputeReplay, DummyBoundary, StorageCapture};
#[cfg(feature = "server")]
pub use decode::metrics::DecodeMetrics;
#[cfg(feature = "server")]
pub use server::{serve_boundary_requests, Config, Server};
| 29.645161 | 79 | 0.729053 |
d9df17ce3441b2bb49577cf3f8e132f71d5751bb | 2,266 | use {
std::{
ops::Deref,
marker::PhantomData,
fmt::Debug,
},
crate::TrackKey,
};
#[derive(Debug, Clone)]
pub enum TrackNode<Key, Data, NA>
where
Key: TrackKey,
Data: Debug + Default + Clone,
NA: Debug + Deref<Target=Data> + Clone
{
Aligned(Data),
NotAligned(NotAlignedNode<Key, Data, NA>)
}
impl<Key, Data, NA> Default for TrackNode<Key, Data, NA>
where
Key: TrackKey,
Data: Debug + Default + Clone,
NA: Debug + Deref<Target=Data> + Clone
{
fn default() -> Self {
Self::Aligned(Data::default())
}
}
impl<Key, Data, NA> From<Data> for TrackNode<Key, Data, NA>
where
Key: TrackKey,
Data: Debug + Default + Clone,
NA: Debug + Deref<Target=Data> + Clone
{
fn from(node: Data) -> Self {
Self::Aligned(node)
}
}
impl<Key, Data, NA> From<NotAlignedNode<Key, Data, NA>> for TrackNode<Key, Data, NA>
where
Key: TrackKey,
Data: Debug + Default + Clone,
NA: Debug + Deref<Target=Data> + Clone
{
fn from(node: NotAlignedNode<Key, Data, NA>) -> Self {
Self::NotAligned(node)
}
}
impl<Key, Data, NA> Deref for TrackNode<Key, Data, NA>
where
Key: TrackKey,
Data: Debug + Default + Clone,
NA: Debug + Deref<Target=Data> + Clone
{
type Target = Data;
fn deref(&self) -> &Self::Target {
match self {
Self::Aligned(aligned) => aligned,
Self::NotAligned(not_aligned) => not_aligned.deref(),
}
}
}
#[derive(Debug, Clone)]
pub struct NotAlignedNode<Key, C, NA>
where
Key: TrackKey,
C: Debug + Clone,
NA: Debug + Clone,
{
pub(crate) node: NA,
pub(crate) key: Key,
pub(crate) canceled_node: C,
pub(crate) canceled_key: Key,
pub(crate) phantom: PhantomData<C>
}
impl<Key, C, NA> NotAlignedNode<Key, C, NA>
where
Key: TrackKey,
C: Debug + Clone,
NA: Debug + Clone,
{
pub fn canceled_node(&self) -> &C {
&self.canceled_node
}
pub fn canceled_key(&self) -> &Key {
&self.canceled_key
}
}
impl<Key, C, NA> Deref for NotAlignedNode<Key, C, NA>
where
Key: TrackKey,
C: Debug + Clone,
NA: Debug + Clone,
{
type Target = NA;
fn deref(&self) -> &Self::Target {
&self.node
}
} | 20.414414 | 84 | 0.584289 |
1ecf012e04be4cb1a1695b6e9b5b953cc0e2a66a | 473 | // FIXME https://github.com/rust-lang/rust/issues/59774
// normalize-stderr-test "thread.*panicked.*Metadata module not compiled.*\n" -> ""
// normalize-stderr-test "note:.*RUST_BACKTRACE=1.*\n" -> ""
fn generic<T: Copy>(t: T) {
let s: [T; 1518600000] = [t; 1518600000];
//~^ ERROR the type `[[u8; 1518599999]; 1518600000]` is too big for the current architecture
}
fn main() {
let x: [u8; 1518599999] = [0; 1518599999];
generic::<[u8; 1518599999]>(x);
}
| 33.785714 | 96 | 0.638478 |
14839509068f03a46b98d155418b180c7116b309 | 27,883 | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
#[cfg(test)]
mod peer_test;
use super::bad_block_cache::BadBlockCache;
use super::bucket::{SyncBucket, SyncBucketSet};
use super::sync_state::SyncState;
use super::sync_worker::SyncWorker;
use super::{network_context::SyncNetworkContext, Error};
use crate::network_context::HelloResponseFuture;
use amt::Amt;
use async_std::channel::{bounded, Receiver, Sender};
use async_std::sync::{Mutex, RwLock};
use async_std::task::{self, JoinHandle};
use beacon::{Beacon, BeaconSchedule};
use blocks::{Block, FullTipset, GossipBlock, Tipset, TipsetKeys, TxMeta};
use chain::ChainStore;
use cid::{Cid, Code::Blake2b256};
use clock::ChainEpoch;
use encoding::{Cbor, Error as EncodingError};
use fil_types::verifier::ProofVerifier;
use forest_libp2p::{hello::HelloRequest, rpc::RequestResponseError, NetworkEvent, NetworkMessage};
use futures::stream::StreamExt;
use futures::{future::try_join_all, try_join};
use futures::{select, stream::FuturesUnordered};
use ipld_blockstore::BlockStore;
use libp2p::core::PeerId;
use log::{debug, error, info, trace, warn};
use message::{SignedMessage, UnsignedMessage};
use message_pool::{MessagePool, Provider};
use networks::BLOCK_DELAY_SECS;
use serde::Deserialize;
use state_manager::StateManager;
use std::sync::Arc;
use std::{
marker::PhantomData,
time::{SystemTime, UNIX_EPOCH},
};
const MAX_HEIGHT_DRIFT: u64 = 5;
// TODO revisit this type, necessary for two sets of Arc<Mutex<>> because each state is
// on separate thread and needs to be mutated independently, but the vec needs to be read
// on the RPC API thread and mutated on this thread.
type WorkerState = Arc<RwLock<Vec<Arc<RwLock<SyncState>>>>>;
#[derive(Debug, PartialEq)]
pub enum ChainSyncState {
/// Bootstrapping peers before starting sync.
Bootstrap,
/// Syncing chain with ChainExchange protocol.
Initial,
/// Following chain with blocks received over gossipsub.
Follow,
}
/// Struct that defines syncing configuration options
#[derive(Debug, Deserialize, Clone)]
pub struct SyncConfig {
/// Request window length for tipsets during chain exchange
pub req_window: i64,
/// Number of tasks spawned for sync workers
pub worker_tasks: usize,
}
impl SyncConfig {
pub fn new(req_window: i64, worker_tasks: usize) -> Self {
Self {
req_window,
worker_tasks,
}
}
}
impl Default for SyncConfig {
// TODO benchmark (1 is temporary value to avoid overlap)
fn default() -> Self {
Self {
req_window: 200,
worker_tasks: 1,
}
}
}
/// Struct that handles the ChainSync logic. This handles incoming network events such as
/// gossipsub messages, Hello protocol requests, as well as sending and receiving ChainExchange
/// messages to be able to do the initial sync.
pub struct ChainSyncer<DB, TBeacon, V, M> {
/// State of general `ChainSync` protocol.
state: Arc<Mutex<ChainSyncState>>,
/// Syncing state of chain sync workers.
worker_state: WorkerState,
/// Drand randomness beacon
beacon: Arc<BeaconSchedule<TBeacon>>,
/// manages retrieving and updates state objects
state_manager: Arc<StateManager<DB>>,
/// Bucket queue for incoming tipsets
sync_queue: SyncBucketSet,
/// Represents tipsets related to ones already being synced to avoid duplicate work.
active_sync_tipsets: SyncBucketSet,
/// Represents next tipset to be synced.
next_sync_target: Option<SyncBucket>,
/// Context to be able to send requests to p2p network
network: SyncNetworkContext<DB>,
/// the known genesis tipset
genesis: Arc<Tipset>,
/// Bad blocks cache, updates based on invalid state transitions.
/// Will mark any invalid blocks and all childen as bad in this bounded cache
bad_blocks: Arc<BadBlockCache>,
/// incoming network events to be handled by syncer
net_handler: Receiver<NetworkEvent>,
/// Proof verification implementation.
verifier: PhantomData<V>,
mpool: Arc<MessagePool<M>>,
/// Syncing configurations
sync_config: SyncConfig,
}
impl<DB, TBeacon, V, M> ChainSyncer<DB, TBeacon, V, M>
where
TBeacon: Beacon + Sync + Send + 'static,
DB: BlockStore + Sync + Send + 'static,
V: ProofVerifier + Sync + Send + 'static,
M: Provider + Sync + Send + 'static,
{
pub fn new(
state_manager: Arc<StateManager<DB>>,
beacon: Arc<BeaconSchedule<TBeacon>>,
mpool: Arc<MessagePool<M>>,
network_send: Sender<NetworkMessage>,
network_rx: Receiver<NetworkEvent>,
genesis: Arc<Tipset>,
cfg: SyncConfig,
) -> Result<Self, Error> {
let network = SyncNetworkContext::new(
network_send,
Default::default(),
state_manager.blockstore_cloned(),
);
Ok(Self {
state: Arc::new(Mutex::new(ChainSyncState::Bootstrap)),
worker_state: Default::default(),
beacon,
network,
genesis,
state_manager,
bad_blocks: Arc::new(BadBlockCache::default()),
net_handler: network_rx,
sync_queue: SyncBucketSet::default(),
active_sync_tipsets: Default::default(),
next_sync_target: None,
verifier: Default::default(),
mpool,
sync_config: cfg,
})
}
/// Returns a clone of the bad blocks cache to be used outside of chain sync.
pub fn bad_blocks_cloned(&self) -> Arc<BadBlockCache> {
self.bad_blocks.clone()
}
/// Returns a cloned `Arc` of the sync worker state.
pub fn sync_state_cloned(&self) -> WorkerState {
self.worker_state.clone()
}
async fn handle_network_event(
&mut self,
network_event: NetworkEvent,
new_ts_tx: &Sender<(PeerId, FullTipset)>,
hello_futures: &FuturesUnordered<HelloResponseFuture>,
) {
match network_event {
NetworkEvent::HelloRequest { request, source } => {
debug!(
"Message inbound, heaviest tipset cid: {:?}",
request.heaviest_tip_set
);
let new_ts_tx_cloned = new_ts_tx.clone();
let cs_cloned = self.state_manager.chain_store().clone();
let net_cloned = self.network.clone();
// TODO determine if tasks started to fetch and load tipsets should be
// limited. Currently no cap on this.
task::spawn(async move {
Self::fetch_and_inform_tipset(
cs_cloned,
net_cloned,
source,
TipsetKeys::new(request.heaviest_tip_set),
new_ts_tx_cloned,
)
.await;
});
}
NetworkEvent::PeerConnected(peer_id) => {
let heaviest = self
.state_manager
.chain_store()
.heaviest_tipset()
.await
.unwrap();
if self.network.peer_manager().is_peer_new(&peer_id).await {
match self
.network
.hello_request(
peer_id,
HelloRequest {
heaviest_tip_set: heaviest.cids().to_vec(),
heaviest_tipset_height: heaviest.epoch(),
heaviest_tipset_weight: heaviest.weight().clone(),
genesis_hash: *self.genesis.blocks()[0].cid(),
},
)
.await
{
Ok(hello_fut) => {
hello_futures.push(hello_fut);
}
Err(e) => {
error!("{}", e);
}
}
}
}
NetworkEvent::PeerDisconnected(peer_id) => {
self.network.peer_manager().remove_peer(&peer_id).await;
}
NetworkEvent::PubsubMessage { source, message } => {
if *self.state.lock().await != ChainSyncState::Follow {
// Ignore gossipsub events if not in following state
return;
}
match message {
forest_libp2p::PubsubMessage::Block(b) => {
let network = self.network.clone();
let channel = new_ts_tx.clone();
task::spawn(async move {
Self::handle_gossip_block(b, source, network, &channel).await;
});
}
forest_libp2p::PubsubMessage::Message(m) => {
// add message to message pool
// TODO handle adding message to mempool in seperate task.
if let Err(e) = self.mpool.add(m).await {
trace!("Gossip Message failed to be added to Message pool: {}", e);
}
}
}
}
// All other network events are being ignored currently
_ => {}
}
}
async fn handle_gossip_block(
block: GossipBlock,
source: PeerId,
network: SyncNetworkContext<DB>,
channel: &Sender<(PeerId, FullTipset)>,
) {
info!(
"Received block over GossipSub: {} height {} from {}",
block.header.cid(),
block.header.epoch(),
source
);
// Get bls_messages in the store or over Bitswap
let bls_messages: Vec<_> = block
.bls_messages
.into_iter()
.map(|m| network.bitswap_get::<UnsignedMessage>(m))
.collect();
// Get secp_messages in the store or over Bitswap
let secp_messages: Vec<_> = block
.secpk_messages
.into_iter()
.map(|m| network.bitswap_get::<SignedMessage>(m))
.collect();
let (bls_messages, secp_messages) =
match try_join!(try_join_all(bls_messages), try_join_all(secp_messages)) {
Ok(msgs) => msgs,
Err(e) => {
warn!("Failed to get message: {}", e);
return;
}
};
// Form block
let block = Block {
header: block.header,
bls_messages,
secp_messages,
};
let ts = FullTipset::new(vec![block]).unwrap();
if channel.send((source, ts)).await.is_err() {
error!("Failed to update peer list, receiver dropped");
}
}
/// Spawns a network handler and begins the syncing process.
pub async fn start(mut self, worker_tx: Sender<Arc<Tipset>>, worker_rx: Receiver<Arc<Tipset>>) {
for i in 0..self.sync_config.worker_tasks {
self.spawn_worker(worker_rx.clone(), i).await;
}
// Channels to handle fetching hello tipsets in separate task and return tipset.
let (new_ts_tx, new_ts_rx) = bounded(10);
let mut hello_futures = FuturesUnordered::<HelloResponseFuture>::new();
let mut fused_handler = self.net_handler.clone().fuse();
let mut fused_inform_channel = new_ts_rx.fuse();
loop {
// TODO would be ideal if this is a future attached to the select
if worker_tx.is_empty() {
if let Some(tar) = self.next_sync_target.take() {
if let Some(ts) = tar.heaviest_tipset() {
self.active_sync_tipsets.insert(ts.clone());
worker_tx
.send(ts)
.await
.expect("Worker receivers should not be dropped");
}
}
}
select! {
network_event = fused_handler.next() => match network_event {
Some(event) => self.handle_network_event(
event,
&new_ts_tx,
&hello_futures).await,
None => break,
},
inform_head_event = fused_inform_channel.next() => match inform_head_event {
Some((peer, new_head)) => {
if let Err(e) = self.inform_new_head(peer, new_head).await {
warn!("failed to inform new head from peer {}: {}", peer, e);
}
}
None => break,
},
hello_event = hello_futures.select_next_some() => match hello_event {
(peer_id, sent, Some(Ok(_res))) => {
let lat = SystemTime::now().duration_since(sent).unwrap_or_default();
self.network.peer_manager().log_success(peer_id, lat).await;
},
(peer_id, sent, Some(Err(e))) => {
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
self.network.peer_manager().mark_peer_bad(peer_id).await;
}
// Log failure for timeout on remote node.
RequestResponseError::Timeout => {
let lat = SystemTime::now().duration_since(sent).unwrap_or_default();
self.network.peer_manager().log_failure(peer_id, lat).await;
},
}
}
// This is indication of timeout on receiver, log failure.
(peer_id, sent, None) => {
let lat = SystemTime::now().duration_since(sent).unwrap_or_default();
self.network.peer_manager().log_failure(peer_id, lat).await;
},
},
}
}
}
/// Fetches a tipset from store or network, then passes the tipset back through the channel
/// to inform of the new head.
async fn fetch_and_inform_tipset(
cs: Arc<ChainStore<DB>>,
network: SyncNetworkContext<DB>,
peer_id: PeerId,
tsk: TipsetKeys,
channel: Sender<(PeerId, FullTipset)>,
) {
match Self::fetch_full_tipset(cs.as_ref(), &network, peer_id, &tsk).await {
Ok(fts) => {
channel
.send((peer_id, fts))
.await
.expect("Inform tipset receiver dropped");
}
Err(e) => {
debug!("Failed to fetch full tipset from peer ({}): {}", peer_id, e);
}
}
}
/// Spawns a new sync worker and pushes the state to the `ChainSyncer`
async fn spawn_worker(&mut self, channel: Receiver<Arc<Tipset>>, id: usize) -> JoinHandle<()> {
let state = Arc::new(RwLock::new(SyncState::default()));
// push state to managed states in Syncer.
self.worker_state.write().await.push(state.clone());
SyncWorker {
state,
beacon: self.beacon.clone(),
state_manager: self.state_manager.clone(),
network: self.network.clone(),
genesis: self.genesis.clone(),
bad_blocks: self.bad_blocks.clone(),
verifier: PhantomData::<V>::default(),
req_window: self.sync_config.req_window,
}
.spawn(channel, Arc::clone(&self.state), id)
.await
}
/// informs the syncer about a new potential tipset
/// This should be called when connecting to new peers, and additionally
/// when receiving new blocks from the network
pub async fn inform_new_head(&mut self, peer: PeerId, ts: FullTipset) -> Result<(), Error> {
// check if full block is nil and if so return error
if ts.blocks().is_empty() {
return Err(Error::NoBlocks);
}
if self.is_epoch_beyond_curr_max(ts.epoch()) {
error!("Received block with impossibly large height {}", ts.epoch());
return Err(Error::Validation(
"Block has impossibly large height".to_string(),
));
}
for block in ts.blocks() {
if let Some(bad) = self.bad_blocks.peek(block.cid()).await {
warn!("Bad block detected, cid: {}", bad);
return Err(Error::Other("Block marked as bad".to_string()));
}
}
// compare target_weight to heaviest weight stored; ignore otherwise
let candidate_ts = self
.state_manager
.chain_store()
.heaviest_tipset()
.await
// TODO we should be able to queue a tipset with the same weight on a different chain.
// Currently needed to go GT because equal tipsets are attempted to be synced.
.map(|heaviest| ts.weight() >= heaviest.weight())
.unwrap_or(true);
if candidate_ts {
// Check message meta after all other checks (expensive)
for block in ts.blocks() {
self.validate_msg_meta(block)?;
}
self.set_peer_head(peer, Arc::new(ts.into_tipset())).await;
}
Ok(())
}
async fn set_peer_head(&mut self, peer: PeerId, ts: Arc<Tipset>) {
self.network
.peer_manager()
.update_peer_head(peer, Arc::clone(&ts))
.await;
// Only update target on initial sync
if *self.state.lock().await == ChainSyncState::Bootstrap {
if let Some(best_target) = self.select_sync_target().await {
self.schedule_tipset(best_target).await;
*self.state.lock().await = ChainSyncState::Initial;
return;
}
}
self.schedule_tipset(ts).await;
}
/// Selects max sync target from current peer set
async fn select_sync_target(&self) -> Option<Arc<Tipset>> {
// Retrieve all peer heads from peer manager
let heads = self.network.peer_manager().get_peer_heads().await;
heads.iter().max_by_key(|h| h.epoch()).cloned()
}
/// Schedules a new tipset to be handled by the sync manager
async fn schedule_tipset(&mut self, tipset: Arc<Tipset>) {
debug!("Scheduling incoming tipset to sync: {:?}", tipset.cids());
// TODO check if this is already synced.
for act_state in self.worker_state.read().await.iter() {
if let Some(target) = act_state.read().await.target() {
// Already currently syncing this, so just return
if target == &tipset {
return;
}
// The new tipset is the successor block of a block being synced, add it to queue.
// We might need to check if it is still currently syncing or if it is complete...
if tipset.parents() == target.key() {
self.sync_queue.insert(tipset);
if self.next_sync_target.is_none() {
if let Some(target_bucket) = self.sync_queue.pop() {
self.next_sync_target = Some(target_bucket);
}
}
return;
}
}
}
if self.sync_queue.related_to_any(&tipset) {
self.sync_queue.insert(tipset);
if self.next_sync_target.is_none() {
if let Some(target_bucket) = self.sync_queue.pop() {
self.next_sync_target = Some(target_bucket);
}
}
return;
}
// TODO sync the fork?
// Check if the incoming tipset is heavier than the heaviest tipset in the queue.
// If it isnt, return because we dont want to sync that.
let queue_heaviest = self.sync_queue.heaviest();
if let Some(qtip) = queue_heaviest {
if qtip.weight() > tipset.weight() {
return;
}
}
// Heavy enough to be synced. If there is no current thing being synced,
// add it to be synced right away. Otherwise, add it to the queue.
self.sync_queue.insert(tipset);
if self.next_sync_target.is_none() {
if let Some(target_bucket) = self.sync_queue.pop() {
self.next_sync_target = Some(target_bucket);
}
}
}
/// Validates message root from header matches message root generated from the
/// bls and secp messages contained in the passed in block and stores them in a key-value store
fn validate_msg_meta(&self, block: &Block) -> Result<(), Error> {
let sm_root = compute_msg_meta(
self.state_manager.blockstore(),
block.bls_msgs(),
block.secp_msgs(),
)?;
if block.header().messages() != &sm_root {
return Err(Error::InvalidRoots);
}
chain::persist_objects(self.state_manager.blockstore(), block.bls_msgs())?;
chain::persist_objects(self.state_manager.blockstore(), block.secp_msgs())?;
Ok(())
}
fn is_epoch_beyond_curr_max(&self, epoch: ChainEpoch) -> bool {
let genesis = self.genesis.as_ref();
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
epoch as u64 > ((now - genesis.min_timestamp()) / BLOCK_DELAY_SECS) + MAX_HEIGHT_DRIFT
}
/// Returns `FullTipset` from store if `TipsetKeys` exist in key-value store otherwise requests
/// `FullTipset` from block sync
async fn fetch_full_tipset(
cs: &ChainStore<DB>,
network: &SyncNetworkContext<DB>,
peer_id: PeerId,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let fts = match Self::load_fts(cs, tsk).await {
Ok(fts) => fts,
Err(_) => network.chain_exchange_fts(Some(peer_id), tsk).await?,
};
Ok(fts)
}
/// Returns a reconstructed FullTipset from store if keys exist
async fn load_fts(cs: &ChainStore<DB>, keys: &TipsetKeys) -> Result<FullTipset, Error> {
let mut blocks = Vec::new();
// retrieve tipset from store based on passed in TipsetKeys
let ts = cs.tipset_from_keys(keys).await?;
for header in ts.blocks() {
// retrieve bls and secp messages from specified BlockHeader
let (bls_msgs, secp_msgs) = chain::block_messages(cs.blockstore(), &header)?;
// construct a full block
let full_block = Block {
header: header.clone(),
bls_messages: bls_msgs,
secp_messages: secp_msgs,
};
// push vector of full blocks to build FullTipset
blocks.push(full_block);
}
// construct FullTipset
let fts = FullTipset::new(blocks)?;
Ok(fts)
}
}
/// Returns message root CID from bls and secp message contained in the param Block.
pub fn compute_msg_meta<DB: BlockStore>(
blockstore: &DB,
bls_msgs: &[UnsignedMessage],
secp_msgs: &[SignedMessage],
) -> Result<Cid, Error> {
// collect bls and secp cids
let bls_cids = cids_from_messages(bls_msgs)?;
let secp_cids = cids_from_messages(secp_msgs)?;
// generate Amt and batch set message values
let bls_root = Amt::new_from_iter(blockstore, bls_cids)?;
let secp_root = Amt::new_from_iter(blockstore, secp_cids)?;
let meta = TxMeta {
bls_message_root: bls_root,
secp_message_root: secp_root,
};
// store message roots and receive meta_root cid
let meta_root = blockstore
.put(&meta, Blake2b256)
.map_err(|e| Error::Other(e.to_string()))?;
Ok(meta_root)
}
fn cids_from_messages<T: Cbor>(messages: &[T]) -> Result<Vec<Cid>, EncodingError> {
messages.iter().map(Cbor::cid).collect()
}
#[cfg(test)]
mod tests {
use super::*;
use async_std::channel::{bounded, Sender};
use async_std::task;
use beacon::{BeaconPoint, MockBeacon};
use db::MemoryDB;
use fil_types::verifier::MockVerifier;
use forest_libp2p::NetworkEvent;
use message_pool::{test_provider::TestApi, MessagePool};
use state_manager::StateManager;
use std::convert::TryFrom;
use std::sync::Arc;
use std::time::Duration;
use test_utils::{construct_dummy_header, construct_messages};
fn chain_syncer_setup(
db: Arc<MemoryDB>,
) -> (
ChainSyncer<MemoryDB, MockBeacon, MockVerifier, TestApi>,
Sender<NetworkEvent>,
Receiver<NetworkMessage>,
) {
let chain_store = Arc::new(ChainStore::new(db.clone()));
let test_provider = TestApi::default();
let (tx, _rx) = bounded(10);
let mpool = task::block_on(MessagePool::new(
test_provider,
"test".to_string(),
tx,
Default::default(),
))
.unwrap();
let mpool = Arc::new(mpool);
let (local_sender, test_receiver) = bounded(20);
let (event_sender, event_receiver) = bounded(20);
let gen = construct_dummy_header();
chain_store.set_genesis(&gen).unwrap();
let beacon = Arc::new(BeaconSchedule(vec![BeaconPoint {
height: 0,
beacon: Arc::new(MockBeacon::new(Duration::from_secs(1))),
}]));
let genesis_ts = Arc::new(Tipset::new(vec![gen]).unwrap());
(
ChainSyncer::new(
Arc::new(StateManager::new(chain_store)),
beacon,
mpool,
local_sender,
event_receiver,
genesis_ts,
SyncConfig::default(),
)
.unwrap(),
event_sender,
test_receiver,
)
}
#[test]
fn chainsync_constructor() {
let db = Arc::new(MemoryDB::default());
// Test just makes sure that the chain syncer can be created without using a live database or
// p2p network (local channels to simulate network messages and responses)
let _chain_syncer = chain_syncer_setup(db);
}
#[test]
fn compute_msg_meta_given_msgs_test() {
let db = Arc::new(MemoryDB::default());
let (cs, _, _) = chain_syncer_setup(db);
let (bls, secp) = construct_messages();
let expected_root =
Cid::try_from("bafy2bzaceasssikoiintnok7f3sgnekfifarzobyr3r4f25sgxmn23q4c35ic")
.unwrap();
let root = compute_msg_meta(cs.state_manager.blockstore(), &[bls], &[secp]).unwrap();
assert_eq!(root, expected_root);
}
#[test]
fn empty_msg_meta_vector() {
let blockstore = MemoryDB::default();
let usm: Vec<UnsignedMessage> =
encoding::from_slice(&base64::decode("gA==").unwrap()).unwrap();
let sm: Vec<SignedMessage> =
encoding::from_slice(&base64::decode("gA==").unwrap()).unwrap();
assert_eq!(
compute_msg_meta(&blockstore, &usm, &sm)
.unwrap()
.to_string(),
"bafy2bzacecmda75ovposbdateg7eyhwij65zklgyijgcjwynlklmqazpwlhba"
);
}
}
| 36.931126 | 101 | 0.556145 |
e8f5b03f73d27ddd428464ec67aa4a1948ad2dda | 27,779 | //! Build a dist manifest, hash and sign everything.
//! This gets called by `promote-release`
//! (https://github.com/rust-lang/rust-central-station/tree/master/promote-release)
//! via `x.py dist hash-and-sign`; the cmdline arguments are set up
//! by rustbuild (in `src/bootstrap/dist.rs`).
#![deny(warnings)]
use serde::Serialize;
use toml;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::env;
use std::fs::{self, File};
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
static HOSTS: &[&str] = &[
"aarch64-unknown-linux-gnu",
"arm-unknown-linux-gnueabi",
"arm-unknown-linux-gnueabihf",
"armv7-unknown-linux-gnueabihf",
"i686-apple-darwin",
"i686-pc-windows-gnu",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"mips-unknown-linux-gnu",
"mips64-unknown-linux-gnuabi64",
"mips64el-unknown-linux-gnuabi64",
"mipsel-unknown-linux-gnu",
"mipsisa32r6-unknown-linux-gnu",
"mipsisa32r6el-unknown-linux-gnu",
"mipsisa64r6-unknown-linux-gnuabi64",
"mipsisa64r6el-unknown-linux-gnuabi64",
"powerpc-unknown-linux-gnu",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-gnu",
"x86_64-pc-windows-msvc",
"x86_64-unknown-freebsd",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-netbsd",
];
static TARGETS: &[&str] = &[
"aarch64-apple-ios",
"aarch64-fuchsia",
"aarch64-linux-android",
"aarch64-pc-windows-msvc",
"aarch64-unknown-cloudabi",
"aarch64-unknown-hermit",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"aarch64-unknown-redox",
"arm-linux-androideabi",
"arm-unknown-linux-gnueabi",
"arm-unknown-linux-gnueabihf",
"arm-unknown-linux-musleabi",
"arm-unknown-linux-musleabihf",
"armv5te-unknown-linux-gnueabi",
"armv5te-unknown-linux-musleabi",
"armv7-apple-ios",
"armv7-linux-androideabi",
"thumbv7neon-linux-androideabi",
"armv7-unknown-linux-gnueabi",
"armv7-unknown-linux-gnueabihf",
"armv7a-none-eabi",
"thumbv7neon-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabi",
"armv7-unknown-linux-musleabihf",
"armebv7r-none-eabi",
"armebv7r-none-eabihf",
"armv7r-none-eabi",
"armv7r-none-eabihf",
"armv7s-apple-ios",
"asmjs-unknown-emscripten",
"i386-apple-ios",
"i586-pc-windows-msvc",
"i586-unknown-linux-gnu",
"i586-unknown-linux-musl",
"i686-apple-darwin",
"i686-linux-android",
"i686-pc-windows-gnu",
"i686-pc-windows-msvc",
"i686-unknown-freebsd",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"mips-unknown-linux-gnu",
"mips-unknown-linux-musl",
"mips64-unknown-linux-gnuabi64",
"mips64-unknown-linux-muslabi64",
"mips64el-unknown-linux-gnuabi64",
"mips64el-unknown-linux-muslabi64",
"mipsisa32r6-unknown-linux-gnu",
"mipsisa32r6el-unknown-linux-gnu",
"mipsisa64r6-unknown-linux-gnuabi64",
"mipsisa64r6el-unknown-linux-gnuabi64",
"mipsel-unknown-linux-gnu",
"mipsel-unknown-linux-musl",
"nvptx64-nvidia-cuda",
"powerpc-unknown-linux-gnu",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"riscv32i-unknown-none-elf",
"riscv32imc-unknown-none-elf",
"riscv32imac-unknown-none-elf",
"riscv64imac-unknown-none-elf",
"riscv64gc-unknown-none-elf",
"riscv64gc-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"sparc64-unknown-linux-gnu",
"sparcv9-sun-solaris",
"thumbv6m-none-eabi",
"thumbv7em-none-eabi",
"thumbv7em-none-eabihf",
"thumbv7m-none-eabi",
"thumbv8m.base-none-eabi",
"thumbv8m.main-none-eabi",
"thumbv8m.main-none-eabihf",
"wasm32-unknown-emscripten",
"wasm32-unknown-unknown",
"wasm32-wasi",
"x86_64-apple-darwin",
"x86_64-apple-ios",
"x86_64-fortanix-unknown-sgx",
"x86_64-fuchsia",
"x86_64-linux-android",
"x86_64-pc-windows-gnu",
"x86_64-pc-windows-msvc",
"x86_64-rumprun-netbsd",
"x86_64-sun-solaris",
"x86_64-pc-solaris",
"x86_64-unknown-cloudabi",
"x86_64-unknown-freebsd",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-gnux32",
"x86_64-unknown-linux-musl",
"x86_64-unknown-netbsd",
"x86_64-unknown-redox",
"x86_64-unknown-hermit",
"tl45-unknown-unknown",
];
static DOCS_TARGETS: &[&str] = &[
"i686-apple-darwin",
"i686-pc-windows-gnu",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-gnu",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
];
static MINGW: &[&str] = &["i686-pc-windows-gnu", "x86_64-pc-windows-gnu"];
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
struct Manifest {
manifest_version: String,
date: String,
pkg: BTreeMap<String, Package>,
renames: BTreeMap<String, Rename>,
profiles: BTreeMap<String, Vec<String>>,
}
#[derive(Serialize)]
struct Package {
version: String,
git_commit_hash: Option<String>,
target: BTreeMap<String, Target>,
}
#[derive(Serialize)]
struct Rename {
to: String,
}
#[derive(Serialize, Default)]
struct Target {
available: bool,
url: Option<String>,
hash: Option<String>,
xz_url: Option<String>,
xz_hash: Option<String>,
components: Option<Vec<Component>>,
extensions: Option<Vec<Component>>,
}
impl Target {
fn unavailable() -> Self {
Self::default()
}
}
#[derive(Serialize)]
struct Component {
pkg: String,
target: String,
}
impl Component {
fn from_str(pkg: &str, target: &str) -> Self {
Self { pkg: pkg.to_string(), target: target.to_string() }
}
}
macro_rules! t {
($e:expr) => {
match $e {
Ok(e) => e,
Err(e) => panic!("{} failed with {}", stringify!($e), e),
}
};
}
struct Builder {
rust_release: String,
cargo_release: String,
rls_release: String,
clippy_release: String,
rustfmt_release: String,
llvm_tools_release: String,
lldb_release: String,
miri_release: String,
input: PathBuf,
output: PathBuf,
gpg_passphrase: String,
digests: BTreeMap<String, String>,
s3_address: String,
date: String,
rust_version: Option<String>,
cargo_version: Option<String>,
rls_version: Option<String>,
clippy_version: Option<String>,
rustfmt_version: Option<String>,
llvm_tools_version: Option<String>,
lldb_version: Option<String>,
miri_version: Option<String>,
rust_git_commit_hash: Option<String>,
cargo_git_commit_hash: Option<String>,
rls_git_commit_hash: Option<String>,
clippy_git_commit_hash: Option<String>,
rustfmt_git_commit_hash: Option<String>,
llvm_tools_git_commit_hash: Option<String>,
lldb_git_commit_hash: Option<String>,
miri_git_commit_hash: Option<String>,
should_sign: bool,
}
fn main() {
// Avoid signing packages while manually testing
// Do NOT set this envvar in CI
let should_sign = env::var("BUILD_MANIFEST_DISABLE_SIGNING").is_err();
// Safety check to ensure signing is always enabled on CI
// The CI environment variable is set by both Travis and AppVeyor
if !should_sign && env::var("CI").is_ok() {
println!("The 'BUILD_MANIFEST_DISABLE_SIGNING' env var can't be enabled on CI.");
println!("If you're not running this on CI, unset the 'CI' env var.");
panic!();
}
let mut args = env::args().skip(1);
let input = PathBuf::from(args.next().unwrap());
let output = PathBuf::from(args.next().unwrap());
let date = args.next().unwrap();
let rust_release = args.next().unwrap();
let s3_address = args.next().unwrap();
let cargo_release = args.next().unwrap();
let rls_release = args.next().unwrap();
let clippy_release = args.next().unwrap();
let miri_release = args.next().unwrap();
let rustfmt_release = args.next().unwrap();
let llvm_tools_release = args.next().unwrap();
let lldb_release = args.next().unwrap();
// Do not ask for a passphrase while manually testing
let mut passphrase = String::new();
if should_sign {
// `x.py` passes the passphrase via stdin.
t!(io::stdin().read_to_string(&mut passphrase));
}
Builder {
rust_release,
cargo_release,
rls_release,
clippy_release,
rustfmt_release,
llvm_tools_release,
lldb_release,
miri_release,
input,
output,
gpg_passphrase: passphrase,
digests: BTreeMap::new(),
s3_address,
date,
rust_version: None,
cargo_version: None,
rls_version: None,
clippy_version: None,
rustfmt_version: None,
llvm_tools_version: None,
lldb_version: None,
miri_version: None,
rust_git_commit_hash: None,
cargo_git_commit_hash: None,
rls_git_commit_hash: None,
clippy_git_commit_hash: None,
rustfmt_git_commit_hash: None,
llvm_tools_git_commit_hash: None,
lldb_git_commit_hash: None,
miri_git_commit_hash: None,
should_sign,
}
.build();
}
enum PkgType {
RustSrc,
Cargo,
Rls,
Clippy,
Rustfmt,
LlvmTools,
Lldb,
Miri,
Other,
}
impl PkgType {
fn from_component(component: &str) -> Self {
use PkgType::*;
match component {
"rust-src" => RustSrc,
"cargo" => Cargo,
"rls" | "rls-preview" => Rls,
"clippy" | "clippy-preview" => Clippy,
"rustfmt" | "rustfmt-preview" => Rustfmt,
"llvm-tools" | "llvm-tools-preview" => LlvmTools,
"lldb" | "lldb-preview" => Lldb,
"miri" | "miri-preview" => Miri,
_ => Other,
}
}
}
impl Builder {
fn build(&mut self) {
self.rust_version = self.version("rust", "x86_64-unknown-linux-gnu");
self.cargo_version = self.version("cargo", "x86_64-unknown-linux-gnu");
self.rls_version = self.version("rls", "x86_64-unknown-linux-gnu");
self.clippy_version = self.version("clippy", "x86_64-unknown-linux-gnu");
self.rustfmt_version = self.version("rustfmt", "x86_64-unknown-linux-gnu");
self.llvm_tools_version = self.version("llvm-tools", "x86_64-unknown-linux-gnu");
// lldb is only built for macOS.
self.lldb_version = self.version("lldb", "x86_64-apple-darwin");
self.miri_version = self.version("miri", "x86_64-unknown-linux-gnu");
self.rust_git_commit_hash = self.git_commit_hash("rust", "x86_64-unknown-linux-gnu");
self.cargo_git_commit_hash = self.git_commit_hash("cargo", "x86_64-unknown-linux-gnu");
self.rls_git_commit_hash = self.git_commit_hash("rls", "x86_64-unknown-linux-gnu");
self.clippy_git_commit_hash = self.git_commit_hash("clippy", "x86_64-unknown-linux-gnu");
self.rustfmt_git_commit_hash = self.git_commit_hash("rustfmt", "x86_64-unknown-linux-gnu");
self.llvm_tools_git_commit_hash =
self.git_commit_hash("llvm-tools", "x86_64-unknown-linux-gnu");
self.lldb_git_commit_hash = self.git_commit_hash("lldb", "x86_64-unknown-linux-gnu");
self.miri_git_commit_hash = self.git_commit_hash("miri", "x86_64-unknown-linux-gnu");
self.check_toolstate();
self.digest_and_sign();
let manifest = self.build_manifest();
self.write_channel_files(&self.rust_release, &manifest);
if self.rust_release != "beta" && self.rust_release != "nightly" {
self.write_channel_files("stable", &manifest);
}
}
/// If a tool does not pass its tests, don't ship it.
/// Right now, we do this only for Miri.
fn check_toolstate(&mut self) {
let toolstates: Option<HashMap<String, String>> =
File::open(self.input.join("toolstates-linux.json"))
.ok()
.and_then(|f| serde_json::from_reader(&f).ok());
let toolstates = toolstates.unwrap_or_else(|| {
println!(
"WARNING: `toolstates-linux.json` missing/malformed; \
assuming all tools failed"
);
HashMap::default() // Use empty map if anything went wrong.
});
// Mark some tools as missing based on toolstate.
if toolstates.get("miri").map(|s| &*s as &str) != Some("test-pass") {
println!("Miri tests are not passing, removing component");
self.miri_version = None;
self.miri_git_commit_hash = None;
}
}
/// Hash all files, compute their signatures, and collect the hashes in `self.digests`.
fn digest_and_sign(&mut self) {
for file in t!(self.input.read_dir()).map(|e| t!(e).path()) {
let filename = file.file_name().unwrap().to_str().unwrap();
let digest = self.hash(&file);
self.sign(&file);
assert!(self.digests.insert(filename.to_string(), digest).is_none());
}
}
fn build_manifest(&mut self) -> Manifest {
let mut manifest = Manifest {
manifest_version: "2".to_string(),
date: self.date.to_string(),
pkg: BTreeMap::new(),
renames: BTreeMap::new(),
profiles: BTreeMap::new(),
};
self.add_packages_to(&mut manifest);
self.add_profiles_to(&mut manifest);
self.add_renames_to(&mut manifest);
manifest.pkg.insert("rust".to_string(), self.rust_package(&manifest));
manifest
}
fn add_packages_to(&mut self, manifest: &mut Manifest) {
let mut package = |name, targets| self.package(name, &mut manifest.pkg, targets);
package("rustc", HOSTS);
package("rustc-dev", HOSTS);
package("cargo", HOSTS);
package("rust-mingw", MINGW);
package("rust-std", TARGETS);
package("rust-docs", DOCS_TARGETS);
package("rust-src", &["*"]);
package("rls-preview", HOSTS);
package("clippy-preview", HOSTS);
package("miri-preview", HOSTS);
package("rustfmt-preview", HOSTS);
package("rust-analysis", TARGETS);
package("llvm-tools-preview", TARGETS);
package("lldb-preview", TARGETS);
}
fn add_profiles_to(&mut self, manifest: &mut Manifest) {
let mut profile = |name, pkgs| self.profile(name, &mut manifest.profiles, pkgs);
profile("minimal", &["rustc", "cargo", "rust-std", "rust-mingw"]);
profile(
"default",
&[
"rustc",
"cargo",
"rust-std",
"rust-mingw",
"rust-docs",
"rustfmt-preview",
"clippy-preview",
],
);
profile(
"complete",
&[
"rustc",
"cargo",
"rust-std",
"rust-mingw",
"rust-docs",
"rustfmt-preview",
"clippy-preview",
"rls-preview",
"rust-src",
"llvm-tools-preview",
"lldb-preview",
"rust-analysis",
"miri-preview",
],
);
// The compiler libraries are not stable for end users, and they're also huge, so we only
// `rustc-dev` for nightly users, and only in the "complete" profile. It's still possible
// for users to install the additional component manually, if needed.
if self.rust_release == "nightly" {
self.extend_profile("complete", &mut manifest.profiles, &["rustc-dev"]);
}
}
fn add_renames_to(&self, manifest: &mut Manifest) {
let mut rename = |from: &str, to: &str| {
manifest.renames.insert(from.to_owned(), Rename { to: to.to_owned() })
};
rename("rls", "rls-preview");
rename("rustfmt", "rustfmt-preview");
rename("clippy", "clippy-preview");
rename("miri", "miri-preview");
}
fn rust_package(&mut self, manifest: &Manifest) -> Package {
let mut pkg = Package {
version: self
.cached_version("rust")
.as_ref()
.expect("Couldn't find Rust version")
.clone(),
git_commit_hash: self.cached_git_commit_hash("rust").clone(),
target: BTreeMap::new(),
};
for host in HOSTS {
if let Some(target) = self.target_host_combination(host, &manifest) {
pkg.target.insert(host.to_string(), target);
} else {
pkg.target.insert(host.to_string(), Target::unavailable());
continue;
}
}
pkg
}
fn target_host_combination(&mut self, host: &str, manifest: &Manifest) -> Option<Target> {
let filename = self.filename("rust", host);
let digest = self.digests.remove(&filename)?;
let xz_filename = filename.replace(".tar.gz", ".tar.xz");
let xz_digest = self.digests.remove(&xz_filename);
let mut components = Vec::new();
let mut extensions = Vec::new();
let host_component = |pkg| Component::from_str(pkg, host);
// rustc/rust-std/cargo/docs are all required,
// and so is rust-mingw if it's available for the target.
components.extend(vec![
host_component("rustc"),
host_component("rust-std"),
host_component("cargo"),
host_component("rust-docs"),
]);
if host.contains("pc-windows-gnu") {
components.push(host_component("rust-mingw"));
}
// Tools are always present in the manifest,
// but might be marked as unavailable if they weren't built.
extensions.extend(vec![
host_component("clippy-preview"),
host_component("miri-preview"),
host_component("rls-preview"),
host_component("rustfmt-preview"),
host_component("llvm-tools-preview"),
host_component("lldb-preview"),
host_component("rust-analysis"),
]);
extensions.extend(
TARGETS
.iter()
.filter(|&&target| target != host)
.map(|target| Component::from_str("rust-std", target)),
);
extensions.extend(HOSTS.iter().map(|target| Component::from_str("rustc-dev", target)));
extensions.push(Component::from_str("rust-src", "*"));
// If the components/extensions don't actually exist for this
// particular host/target combination then nix it entirely from our
// lists.
let has_component = |c: &Component| {
if c.target == "*" {
return true;
}
let pkg = match manifest.pkg.get(&c.pkg) {
Some(p) => p,
None => return false,
};
pkg.target.get(&c.target).is_some()
};
extensions.retain(&has_component);
components.retain(&has_component);
Some(Target {
available: true,
url: Some(self.url(&filename)),
hash: Some(digest),
xz_url: xz_digest.as_ref().map(|_| self.url(&xz_filename)),
xz_hash: xz_digest,
components: Some(components),
extensions: Some(extensions),
})
}
fn profile(
&mut self,
profile_name: &str,
dst: &mut BTreeMap<String, Vec<String>>,
pkgs: &[&str],
) {
dst.insert(profile_name.to_owned(), pkgs.iter().map(|s| (*s).to_owned()).collect());
}
fn extend_profile(
&mut self,
profile_name: &str,
dst: &mut BTreeMap<String, Vec<String>>,
pkgs: &[&str],
) {
dst.get_mut(profile_name)
.expect("existing profile")
.extend(pkgs.iter().map(|s| (*s).to_owned()));
}
fn package(&mut self, pkgname: &str, dst: &mut BTreeMap<String, Package>, targets: &[&str]) {
let (version, mut is_present) = self
.cached_version(pkgname)
.as_ref()
.cloned()
.map(|version| (version, true))
.unwrap_or_default(); // `is_present` defaults to `false` here.
// Miri is nightly-only; never ship it for other trains.
if pkgname == "miri-preview" && self.rust_release != "nightly" {
is_present = false; // Pretend the component is entirely missing.
}
let targets = targets
.iter()
.map(|name| {
if is_present {
// The component generally exists, but it might still be missing for this target.
let filename = self.filename(pkgname, name);
let digest = match self.digests.remove(&filename) {
Some(digest) => digest,
// This component does not exist for this target -- skip it.
None => return (name.to_string(), Target::unavailable()),
};
let xz_filename = filename.replace(".tar.gz", ".tar.xz");
let xz_digest = self.digests.remove(&xz_filename);
(
name.to_string(),
Target {
available: true,
url: Some(self.url(&filename)),
hash: Some(digest),
xz_url: xz_digest.as_ref().map(|_| self.url(&xz_filename)),
xz_hash: xz_digest,
components: None,
extensions: None,
},
)
} else {
// If the component is not present for this build add it anyway but mark it as
// unavailable -- this way rustup won't allow upgrades without --force
(name.to_string(), Target::unavailable())
}
})
.collect();
dst.insert(
pkgname.to_string(),
Package {
version,
git_commit_hash: self.cached_git_commit_hash(pkgname).clone(),
target: targets,
},
);
}
fn url(&self, filename: &str) -> String {
format!("{}/{}/{}", self.s3_address, self.date, filename)
}
fn filename(&self, component: &str, target: &str) -> String {
use PkgType::*;
match PkgType::from_component(component) {
RustSrc => format!("rust-src-{}.tar.gz", self.rust_release),
Cargo => format!("cargo-{}-{}.tar.gz", self.cargo_release, target),
Rls => format!("rls-{}-{}.tar.gz", self.rls_release, target),
Clippy => format!("clippy-{}-{}.tar.gz", self.clippy_release, target),
Rustfmt => format!("rustfmt-{}-{}.tar.gz", self.rustfmt_release, target),
LlvmTools => format!("llvm-tools-{}-{}.tar.gz", self.llvm_tools_release, target),
Lldb => format!("lldb-{}-{}.tar.gz", self.lldb_release, target),
Miri => format!("miri-{}-{}.tar.gz", self.miri_release, target),
Other => format!("{}-{}-{}.tar.gz", component, self.rust_release, target),
}
}
fn cached_version(&self, component: &str) -> &Option<String> {
use PkgType::*;
match PkgType::from_component(component) {
Cargo => &self.cargo_version,
Rls => &self.rls_version,
Clippy => &self.clippy_version,
Rustfmt => &self.rustfmt_version,
LlvmTools => &self.llvm_tools_version,
Lldb => &self.lldb_version,
Miri => &self.miri_version,
_ => &self.rust_version,
}
}
fn cached_git_commit_hash(&self, component: &str) -> &Option<String> {
use PkgType::*;
match PkgType::from_component(component) {
Cargo => &self.cargo_git_commit_hash,
Rls => &self.rls_git_commit_hash,
Clippy => &self.clippy_git_commit_hash,
Rustfmt => &self.rustfmt_git_commit_hash,
LlvmTools => &self.llvm_tools_git_commit_hash,
Lldb => &self.lldb_git_commit_hash,
Miri => &self.miri_git_commit_hash,
_ => &self.rust_git_commit_hash,
}
}
fn version(&self, component: &str, target: &str) -> Option<String> {
self.untar(component, target, |filename| format!("{}/version", filename))
}
fn git_commit_hash(&self, component: &str, target: &str) -> Option<String> {
self.untar(component, target, |filename| format!("{}/git-commit-hash", filename))
}
fn untar<F>(&self, component: &str, target: &str, dir: F) -> Option<String>
where
F: FnOnce(String) -> String,
{
let mut cmd = Command::new("tar");
let filename = self.filename(component, target);
cmd.arg("xf")
.arg(self.input.join(&filename))
.arg(dir(filename.replace(".tar.gz", "")))
.arg("-O");
let output = t!(cmd.output());
if output.status.success() {
Some(String::from_utf8_lossy(&output.stdout).trim().to_string())
} else {
None
}
}
fn hash(&self, path: &Path) -> String {
let sha = t!(Command::new("shasum")
.arg("-a")
.arg("256")
.arg(path.file_name().unwrap())
.current_dir(path.parent().unwrap())
.output());
assert!(sha.status.success());
let filename = path.file_name().unwrap().to_str().unwrap();
let sha256 = self.output.join(format!("{}.sha256", filename));
t!(fs::write(&sha256, &sha.stdout));
let stdout = String::from_utf8_lossy(&sha.stdout);
stdout.split_whitespace().next().unwrap().to_string()
}
fn sign(&self, path: &Path) {
if !self.should_sign {
return;
}
let filename = path.file_name().unwrap().to_str().unwrap();
let asc = self.output.join(format!("{}.asc", filename));
println!("signing: {:?}", path);
let mut cmd = Command::new("gpg");
cmd.arg("--pinentry-mode=loopback")
.arg("--no-tty")
.arg("--yes")
.arg("--batch")
.arg("--passphrase-fd")
.arg("0")
.arg("--personal-digest-preferences")
.arg("SHA512")
.arg("--armor")
.arg("--output")
.arg(&asc)
.arg("--detach-sign")
.arg(path)
.stdin(Stdio::piped());
let mut child = t!(cmd.spawn());
t!(child.stdin.take().unwrap().write_all(self.gpg_passphrase.as_bytes()));
assert!(t!(child.wait()).success());
}
fn write_channel_files(&self, channel_name: &str, manifest: &Manifest) {
self.write(&toml::to_string(&manifest).unwrap(), channel_name, ".toml");
self.write(&manifest.date, channel_name, "-date.txt");
self.write(
manifest.pkg["rust"].git_commit_hash.as_ref().unwrap(),
channel_name,
"-git-commit-hash.txt",
);
}
fn write(&self, contents: &str, channel_name: &str, suffix: &str) {
let dst = self.output.join(format!("channel-rust-{}{}", channel_name, suffix));
t!(fs::write(&dst, contents));
self.hash(&dst);
self.sign(&dst);
}
}
| 34.084663 | 101 | 0.573095 |
2842ec69ddcb29c7f9f7b6f4d4f6a2768587b8c5 | 68,568 | //! `matchingrules` module includes all the classes to deal with V3/V4 spec matchers
use std::{fmt, mem};
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
#[cfg(test)] use std::collections::hash_map::DefaultHasher;
use std::fmt::{Display, Formatter};
use std::hash::{Hash, Hasher};
use std::str::FromStr;
#[cfg(test)] use expectest::prelude::*;
use anyhow::{anyhow, Context as _};
use log::*;
use maplit::hashmap;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use crate::{HttpStatus, PactSpecification};
use crate::generators::{Generator, GeneratorCategory, Generators};
use crate::json_utils::{json_to_num, json_to_string};
use crate::path_exp::DocPath;
/// Set of all matching rules
#[derive(Serialize, Deserialize, Debug, Clone, Eq)]
pub enum MatchingRule {
/// Matcher using equals
Equality,
/// Match using a regular expression
Regex(String),
/// Match using the type of the value
Type,
/// Match using the type of the value and a minimum length for collections
MinType(usize),
/// Match using the type of the value and a maximum length for collections
MaxType(usize),
/// Match using the type of the value and a minimum and maximum length for collections
MinMaxType(usize, usize),
/// Match the value using a timestamp pattern
Timestamp(String),
/// Match the value using a time pattern
Time(String),
/// Match the value using a date pattern
Date(String),
/// Match if the value includes the given value
Include(String),
/// Match if the value is a number
Number,
/// Match if the value is an integer number
Integer,
/// Match if the value is a decimal number
Decimal,
/// Match if the value is a null value (this is content specific, for JSON will match a JSON null)
Null,
/// Match binary data by its content type (magic file check)
ContentType(String),
/// Match array items in any order against a list of variants
ArrayContains(Vec<(usize, MatchingRuleCategory, HashMap<DocPath, Generator>)>),
/// Matcher for values in a map, ignoring the keys
Values,
/// Matches boolean values (booleans and the string values `true` and `false`)
Boolean,
/// Request status code matcher
StatusCode(HttpStatus)
}
impl MatchingRule {
/// Builds a `MatchingRule` from a `Value` struct
pub fn from_json(value: &Value) -> anyhow::Result<MatchingRule> {
match value {
Value::Object(m) => match m.get("match") {
Some(value) => {
let val = json_to_string(value);
match val.as_str() {
"regex" => match m.get(&val) {
Some(s) => Ok(MatchingRule::Regex(json_to_string(s))),
None => Err(anyhow!("Regex matcher missing 'regex' field")),
},
"equality" => Ok(MatchingRule::Equality),
"include" => match m.get("value") {
Some(s) => Ok(MatchingRule::Include(json_to_string(s))),
None => Err(anyhow!("Include matcher missing 'value' field")),
},
"type" => match (json_to_num(m.get("min").cloned()), json_to_num(m.get("max").cloned())) {
(Some(min), Some(max)) => Ok(MatchingRule::MinMaxType(min, max)),
(Some(min), None) => Ok(MatchingRule::MinType(min)),
(None, Some(max)) => Ok(MatchingRule::MaxType(max)),
_ => Ok(MatchingRule::Type)
},
"number" => Ok(MatchingRule::Number),
"integer" => Ok(MatchingRule::Integer),
"decimal" => Ok(MatchingRule::Decimal),
"real" => Ok(MatchingRule::Decimal),
"boolean" => Ok(MatchingRule::Boolean),
"min" => match json_to_num(m.get(&val).cloned()) {
Some(min) => Ok(MatchingRule::MinType(min)),
None => Err(anyhow!("Min matcher missing 'min' field")),
},
"max" => match json_to_num(m.get(&val).cloned()) {
Some(max) => Ok(MatchingRule::MaxType(max)),
None => Err(anyhow!("Max matcher missing 'max' field")),
},
"timestamp" => match m.get("format").or_else(|| m.get(&val)) {
Some(s) => Ok(MatchingRule::Timestamp(json_to_string(s))),
None => Err(anyhow!("Timestamp matcher missing 'timestamp' or 'format' field")),
},
"date" => match m.get("format").or_else(|| m.get(&val)) {
Some(s) => Ok(MatchingRule::Date(json_to_string(s))),
None => Err(anyhow!("Date matcher missing 'date' or 'format' field")),
},
"time" => match m.get("format").or_else(|| m.get(&val)) {
Some(s) => Ok(MatchingRule::Time(json_to_string(s))),
None => Err(anyhow!("Time matcher missing 'time' or 'format' field")),
},
"null" => Ok(MatchingRule::Null),
"contentType" => match m.get("value") {
Some(s) => Ok(MatchingRule::ContentType(json_to_string(s))),
None => Err(anyhow!("ContentType matcher missing 'value' field")),
},
"arrayContains" => match m.get("variants") {
Some(variants) => match variants {
Value::Array(variants) => {
let mut values = Vec::new();
for variant in variants {
let index = json_to_num(variant.get("index").cloned()).unwrap_or_default();
let mut category = MatchingRuleCategory::empty("body");
if let Some(rules) = variant.get("rules") {
category.add_rules_from_json(rules)
.with_context(||
format!("Unable to parse matching rules: {:?}", rules))?;
} else {
category.add_rule(
DocPath::empty(), MatchingRule::Equality, RuleLogic::And);
}
let generators = if let Some(generators_json) = variant.get("generators") {
let mut g = Generators::default();
let cat = GeneratorCategory::BODY;
if let Value::Object(map) = generators_json {
for (k, v) in map {
if let Value::Object(ref map) = v {
let path = DocPath::new(k)?;
g.parse_generator_from_map(&cat, map, Some(path));
}
}
}
g.categories.get(&cat).cloned().unwrap_or_default()
} else {
HashMap::default()
};
values.push((index, category, generators));
}
Ok(MatchingRule::ArrayContains(values))
}
_ => Err(anyhow!("ArrayContains matcher 'variants' field is not an Array")),
}
None => Err(anyhow!("ArrayContains matcher missing 'variants' field")),
}
"values" => Ok(MatchingRule::Values),
"statusCode" => match m.get("status") {
Some(s) => {
let status = HttpStatus::from_json(s)
.context("Unable to parse status code for StatusCode matcher")?;
Ok(MatchingRule::StatusCode(status))
},
None => Ok(MatchingRule::StatusCode(HttpStatus::Success))
},
_ => Err(anyhow!("StatusCode matcher missing 'status' field")),
}
},
None => if let Some(val) = m.get("regex") {
Ok(MatchingRule::Regex(json_to_string(val)))
} else if let Some(val) = json_to_num(m.get("min").cloned()) {
Ok(MatchingRule::MinType(val))
} else if let Some(val) = json_to_num(m.get("max").cloned()) {
Ok(MatchingRule::MaxType(val))
} else if let Some(val) = m.get("timestamp") {
Ok(MatchingRule::Timestamp(json_to_string(val)))
} else if let Some(val) = m.get("time") {
Ok(MatchingRule::Time(json_to_string(val)))
} else if let Some(val) = m.get("date") {
Ok(MatchingRule::Date(json_to_string(val)))
} else {
Err(anyhow!("Matching rule missing 'match' field and unable to guess its type"))
}
},
_ => Err(anyhow!("Matching rule JSON is not an Object")),
}
}
/// Converts this `MatchingRule` to a `Value` struct
pub fn to_json(&self) -> Value {
match self {
MatchingRule::Equality => json!({ "match": "equality" }),
MatchingRule::Regex(ref r) => json!({ "match": "regex",
"regex": r.clone() }),
MatchingRule::Type => json!({ "match": "type" }),
MatchingRule::MinType(min) => json!({ "match": "type",
"min": json!(*min as u64) }),
MatchingRule::MaxType(max) => json!({ "match": "type",
"max": json!(*max as u64) }),
MatchingRule::MinMaxType(min, max) => json!({ "match": "type",
"min": json!(*min as u64), "max": json!(*max as u64) }),
MatchingRule::Timestamp(ref t) => json!({ "match": "timestamp",
"timestamp": Value::String(t.clone()) }),
MatchingRule::Time(ref t) => json!({ "match": "time",
"time": Value::String(t.clone()) }),
MatchingRule::Date(ref d) => json!({ "match": "date",
"date": Value::String(d.clone()) }),
MatchingRule::Include(ref s) => json!({ "match": "include",
"value": Value::String(s.clone()) }),
MatchingRule::Number => json!({ "match": "number" }),
MatchingRule::Integer => json!({ "match": "integer" }),
MatchingRule::Decimal => json!({ "match": "decimal" }),
MatchingRule::Boolean => json!({ "match": "boolean" }),
MatchingRule::Null => json!({ "match": "null" }),
MatchingRule::ContentType(ref r) => json!({ "match": "contentType",
"value": Value::String(r.clone()) }),
MatchingRule::ArrayContains(variants) => json!({
"match": "arrayContains",
"variants": variants.iter().map(|(index, rules, generators)| {
let mut json = json!({
"index": index,
"rules": rules.to_v3_json()
});
if !generators.is_empty() {
json["generators"] = Value::Object(generators.iter()
.map(|(k, gen)| {
if let Some(json) = gen.to_json() {
Some((String::from(k), json))
} else {
None
}
})
.filter(|item| item.is_some())
.map(|item| item.unwrap())
.collect())
}
json
}).collect::<Vec<Value>>()
}),
MatchingRule::Values => json!({ "match": "values" }),
MatchingRule::StatusCode(status) => json!({ "match": "statusCode", "status": status.to_json()})
}
}
/// If there are any generators associated with this matching rule
pub fn has_generators(&self) -> bool {
match self {
MatchingRule::ArrayContains(variants) => variants.iter()
.any(|(_, _, generators)| !generators.is_empty()),
_ => false
}
}
/// Return the generators for this rule
pub fn generators(&self) -> Vec<Generator> {
match self {
MatchingRule::ArrayContains(variants) => vec![Generator::ArrayContains(variants.clone())],
_ => vec![]
}
}
/// Returns the type name of this matching rule
pub fn name(&self) -> String {
match self {
MatchingRule::Equality => "equality",
MatchingRule::Regex(_) => "regex",
MatchingRule::Type => "type",
MatchingRule::MinType(_) => "min-type",
MatchingRule::MaxType(_) => "max-type",
MatchingRule::MinMaxType(_, _) => "min-max-type",
MatchingRule::Timestamp(_) => "datetime",
MatchingRule::Time(_) => "time",
MatchingRule::Date(_) => "date",
MatchingRule::Include(_) => "include",
MatchingRule::Number => "number",
MatchingRule::Integer => "integer",
MatchingRule::Decimal => "decimal",
MatchingRule::Null => "null",
MatchingRule::ContentType(_) => "content-type",
MatchingRule::ArrayContains(_) => "array-contains",
MatchingRule::Values => "values",
MatchingRule::Boolean => "boolean",
MatchingRule::StatusCode(_) => "status-code"
}.to_string()
}
/// Returns the type name of this matching rule
pub fn values(&self) -> HashMap<&'static str, Value> {
let empty = hashmap!{};
match self {
MatchingRule::Equality => empty,
MatchingRule::Regex(r) => hashmap!{ "regex" => Value::String(r.clone()) },
MatchingRule::Type => empty,
MatchingRule::MinType(min) => hashmap!{ "min" => json!(min) },
MatchingRule::MaxType(max) => hashmap!{ "max" => json!(max) },
MatchingRule::MinMaxType(min, max) => hashmap!{ "min" => json!(min), "max" => json!(max) },
MatchingRule::Timestamp(f) => hashmap!{ "format" => Value::String(f.clone()) },
MatchingRule::Time(f) => hashmap!{ "format" => Value::String(f.clone()) },
MatchingRule::Date(f) => hashmap!{ "format" => Value::String(f.clone()) },
MatchingRule::Include(s) => hashmap!{ "value" => Value::String(s.clone()) },
MatchingRule::Number => empty,
MatchingRule::Integer => empty,
MatchingRule::Decimal => empty,
MatchingRule::Null => empty,
MatchingRule::ContentType(ct) => hashmap!{ "content-type" => Value::String(ct.clone()) },
MatchingRule::ArrayContains(variants) => hashmap! { "variants" =>
variants.iter().map(|(variant, rules, gens)| {
Value::Array(vec![json!(variant), rules.to_v3_json(), Value::Object(gens.iter().map(|(key, gen)| {
(key.to_string(), gen.to_json().unwrap())
}).collect())])
}).collect()
},
MatchingRule::Values => empty,
MatchingRule::Boolean => empty,
MatchingRule::StatusCode(sc) => hashmap!{ "status" => sc.to_json() }
}
}
}
impl Hash for MatchingRule {
fn hash<H: Hasher>(&self, state: &mut H) {
mem::discriminant(self).hash(state);
match self {
MatchingRule::Regex(s) => s.hash(state),
MatchingRule::MinType(min) => min.hash(state),
MatchingRule::MaxType(max) => max.hash(state),
MatchingRule::MinMaxType(min, max) => {
min.hash(state);
max.hash(state);
}
MatchingRule::Timestamp(format) => format.hash(state),
MatchingRule::Time(format) => format.hash(state),
MatchingRule::Date(format) => format.hash(state),
MatchingRule::Include(str) => str.hash(state),
MatchingRule::ContentType(str) => str.hash(state),
MatchingRule::ArrayContains(variants) => {
for (index, rules, generators) in variants {
index.hash(state);
rules.hash(state);
for (s, g) in generators {
s.hash(state);
g.hash(state);
}
}
}
_ => ()
}
}
}
impl PartialEq for MatchingRule {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(MatchingRule::Regex(s1), MatchingRule::Regex(s2)) => s1 == s2,
(MatchingRule::MinType(min1), MatchingRule::MinType(min2)) => min1 == min2,
(MatchingRule::MaxType(max1), MatchingRule::MaxType(max2)) => max1 == max2,
(MatchingRule::MinMaxType(min1, max1), MatchingRule::MinMaxType(min2, max2)) => min1 == min2 && max1 == max2,
(MatchingRule::Timestamp(format1), MatchingRule::Timestamp(format2)) => format1 == format2,
(MatchingRule::Time(format1), MatchingRule::Time(format2)) => format1 == format2,
(MatchingRule::Date(format1), MatchingRule::Date(format2)) => format1 == format2,
(MatchingRule::Include(str1), MatchingRule::Include(str2)) => str1 == str2,
(MatchingRule::ContentType(str1), MatchingRule::ContentType(str2)) => str1 == str2,
(MatchingRule::ArrayContains(variants1), MatchingRule::ArrayContains(variants2)) => variants1 == variants2,
_ => mem::discriminant(self) == mem::discriminant(other)
}
}
}
#[cfg(test)]
fn h(rule: &MatchingRule) -> u64 {
let mut hasher = DefaultHasher::new();
rule.hash(&mut hasher);
hasher.finish()
}
#[test]
fn hash_and_partial_eq_for_matching_rule() {
expect!(h(&MatchingRule::Equality)).to(be_equal_to(h(&MatchingRule::Equality)));
expect!(MatchingRule::Equality).to(be_equal_to(MatchingRule::Equality));
expect!(MatchingRule::Equality).to_not(be_equal_to(MatchingRule::Type));
expect!(h(&MatchingRule::Type)).to(be_equal_to(h(&MatchingRule::Type)));
expect!(MatchingRule::Type).to(be_equal_to(MatchingRule::Type));
expect!(h(&MatchingRule::Number)).to(be_equal_to(h(&MatchingRule::Number)));
expect!(MatchingRule::Number).to(be_equal_to(MatchingRule::Number));
expect!(h(&MatchingRule::Integer)).to(be_equal_to(h(&MatchingRule::Integer)));
expect!(MatchingRule::Integer).to(be_equal_to(MatchingRule::Integer));
expect!(h(&MatchingRule::Decimal)).to(be_equal_to(h(&MatchingRule::Decimal)));
expect!(MatchingRule::Decimal).to(be_equal_to(MatchingRule::Decimal));
expect!(h(&MatchingRule::Null)).to(be_equal_to(h(&MatchingRule::Null)));
expect!(MatchingRule::Null).to(be_equal_to(MatchingRule::Null));
let regex1 = MatchingRule::Regex("\\d+".into());
let regex2 = MatchingRule::Regex("\\w+".into());
expect!(h(®ex1)).to(be_equal_to(h(®ex1)));
expect!(®ex1).to(be_equal_to(®ex1));
expect!(h(®ex1)).to_not(be_equal_to(h(®ex2)));
expect!(®ex1).to_not(be_equal_to(®ex2));
let min1 = MatchingRule::MinType(100);
let min2 = MatchingRule::MinType(200);
expect!(h(&min1)).to(be_equal_to(h(&min1)));
expect!(&min1).to(be_equal_to(&min1));
expect!(h(&min1)).to_not(be_equal_to(h(&min2)));
expect!(&min1).to_not(be_equal_to(&min2));
let max1 = MatchingRule::MaxType(100);
let max2 = MatchingRule::MaxType(200);
expect!(h(&max1)).to(be_equal_to(h(&max1)));
expect!(&max1).to(be_equal_to(&max1));
expect!(h(&max1)).to_not(be_equal_to(h(&max2)));
expect!(&max1).to_not(be_equal_to(&max2));
let minmax1 = MatchingRule::MinMaxType(100, 200);
let minmax2 = MatchingRule::MinMaxType(200, 200);
expect!(h(&minmax1)).to(be_equal_to(h(&minmax1)));
expect!(&minmax1).to(be_equal_to(&minmax1));
expect!(h(&minmax1)).to_not(be_equal_to(h(&minmax2)));
expect!(&minmax1).to_not(be_equal_to(&minmax2));
let datetime1 = MatchingRule::Timestamp("yyyy-MM-dd HH:mm:ss".into());
let datetime2 = MatchingRule::Timestamp("yyyy-MM-ddTHH:mm:ss".into());
expect!(h(&datetime1)).to(be_equal_to(h(&datetime1)));
expect!(&datetime1).to(be_equal_to(&datetime1));
expect!(h(&datetime1)).to_not(be_equal_to(h(&datetime2)));
expect!(&datetime1).to_not(be_equal_to(&datetime2));
let date1 = MatchingRule::Date("yyyy-MM-dd".into());
let date2 = MatchingRule::Date("yy-MM-dd".into());
expect!(h(&date1)).to(be_equal_to(h(&date1)));
expect!(&date1).to(be_equal_to(&date1));
expect!(h(&date1)).to_not(be_equal_to(h(&date2)));
expect!(&date1).to_not(be_equal_to(&date2));
let time1 = MatchingRule::Time("HH:mm:ss".into());
let time2 = MatchingRule::Time("hh:mm:ss".into());
expect!(h(&time1)).to(be_equal_to(h(&time1)));
expect!(&time1).to(be_equal_to(&time1));
expect!(h(&time1)).to_not(be_equal_to(h(&time2)));
expect!(&time1).to_not(be_equal_to(&time2));
let inc1 = MatchingRule::Include("string one".into());
let inc2 = MatchingRule::Include("string two".into());
expect!(h(&inc1)).to(be_equal_to(h(&inc1)));
expect!(&inc1).to(be_equal_to(&inc1));
expect!(h(&inc1)).to_not(be_equal_to(h(&inc2)));
expect!(&inc1).to_not(be_equal_to(&inc2));
let content1 = MatchingRule::ContentType("one".into());
let content2 = MatchingRule::ContentType("two".into());
expect!(h(&content1)).to(be_equal_to(h(&content1)));
expect!(&content1).to(be_equal_to(&content1));
expect!(h(&content1)).to_not(be_equal_to(h(&content2)));
expect!(&content1).to_not(be_equal_to(&content2));
let ac1 = MatchingRule::ArrayContains(vec![]);
let ac2 = MatchingRule::ArrayContains(vec![(0, MatchingRuleCategory::empty("body"), hashmap!{})]);
let ac3 = MatchingRule::ArrayContains(vec![(1, MatchingRuleCategory::empty("body"), hashmap!{})]);
let ac4 = MatchingRule::ArrayContains(vec![(0, MatchingRuleCategory::equality("body"), hashmap!{})]);
let ac5 = MatchingRule::ArrayContains(vec![(0, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomBoolean })]);
let ac6 = MatchingRule::ArrayContains(vec![
(0, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomBoolean }),
(1, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomDecimal(10) })
]);
let ac7 = MatchingRule::ArrayContains(vec![
(0, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomBoolean }),
(1, MatchingRuleCategory::equality("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomDecimal(10) })
]);
expect!(h(&ac1)).to(be_equal_to(h(&ac1)));
expect!(h(&ac1)).to_not(be_equal_to(h(&ac2)));
expect!(h(&ac1)).to_not(be_equal_to(h(&ac3)));
expect!(h(&ac1)).to_not(be_equal_to(h(&ac4)));
expect!(h(&ac1)).to_not(be_equal_to(h(&ac5)));
expect!(h(&ac1)).to_not(be_equal_to(h(&ac6)));
expect!(h(&ac1)).to_not(be_equal_to(h(&ac7)));
expect!(h(&ac2)).to(be_equal_to(h(&ac2)));
expect!(h(&ac2)).to_not(be_equal_to(h(&ac1)));
expect!(h(&ac2)).to_not(be_equal_to(h(&ac3)));
expect!(h(&ac2)).to_not(be_equal_to(h(&ac4)));
expect!(h(&ac2)).to_not(be_equal_to(h(&ac5)));
expect!(h(&ac2)).to_not(be_equal_to(h(&ac6)));
expect!(h(&ac2)).to_not(be_equal_to(h(&ac7)));
expect!(h(&ac3)).to(be_equal_to(h(&ac3)));
expect!(h(&ac3)).to_not(be_equal_to(h(&ac2)));
expect!(h(&ac3)).to_not(be_equal_to(h(&ac1)));
expect!(h(&ac3)).to_not(be_equal_to(h(&ac4)));
expect!(h(&ac3)).to_not(be_equal_to(h(&ac5)));
expect!(h(&ac3)).to_not(be_equal_to(h(&ac6)));
expect!(h(&ac3)).to_not(be_equal_to(h(&ac7)));
expect!(h(&ac4)).to(be_equal_to(h(&ac4)));
expect!(h(&ac4)).to_not(be_equal_to(h(&ac2)));
expect!(h(&ac4)).to_not(be_equal_to(h(&ac3)));
expect!(h(&ac4)).to_not(be_equal_to(h(&ac1)));
expect!(h(&ac4)).to_not(be_equal_to(h(&ac5)));
expect!(h(&ac4)).to_not(be_equal_to(h(&ac6)));
expect!(h(&ac4)).to_not(be_equal_to(h(&ac7)));
expect!(h(&ac5)).to(be_equal_to(h(&ac5)));
expect!(h(&ac5)).to_not(be_equal_to(h(&ac2)));
expect!(h(&ac5)).to_not(be_equal_to(h(&ac3)));
expect!(h(&ac5)).to_not(be_equal_to(h(&ac4)));
expect!(h(&ac5)).to_not(be_equal_to(h(&ac1)));
expect!(h(&ac5)).to_not(be_equal_to(h(&ac6)));
expect!(h(&ac5)).to_not(be_equal_to(h(&ac7)));
expect!(h(&ac6)).to(be_equal_to(h(&ac6)));
expect!(h(&ac6)).to_not(be_equal_to(h(&ac2)));
expect!(h(&ac6)).to_not(be_equal_to(h(&ac3)));
expect!(h(&ac6)).to_not(be_equal_to(h(&ac4)));
expect!(h(&ac6)).to_not(be_equal_to(h(&ac5)));
expect!(h(&ac6)).to_not(be_equal_to(h(&ac1)));
expect!(h(&ac6)).to_not(be_equal_to(h(&ac7)));
expect!(h(&ac7)).to(be_equal_to(h(&ac7)));
expect!(h(&ac7)).to_not(be_equal_to(h(&ac2)));
expect!(h(&ac7)).to_not(be_equal_to(h(&ac3)));
expect!(h(&ac7)).to_not(be_equal_to(h(&ac4)));
expect!(h(&ac7)).to_not(be_equal_to(h(&ac5)));
expect!(h(&ac7)).to_not(be_equal_to(h(&ac6)));
expect!(h(&ac7)).to_not(be_equal_to(h(&ac1)));
expect!(&ac1).to(be_equal_to(&ac1));
expect!(&ac1).to_not(be_equal_to(&ac2));
expect!(&ac1).to_not(be_equal_to(&ac3));
expect!(&ac1).to_not(be_equal_to(&ac4));
expect!(&ac1).to_not(be_equal_to(&ac5));
expect!(&ac1).to_not(be_equal_to(&ac6));
expect!(&ac1).to_not(be_equal_to(&ac7));
expect!(&ac2).to(be_equal_to(&ac2));
expect!(&ac2).to_not(be_equal_to(&ac1));
expect!(&ac2).to_not(be_equal_to(&ac3));
expect!(&ac2).to_not(be_equal_to(&ac4));
expect!(&ac2).to_not(be_equal_to(&ac5));
expect!(&ac2).to_not(be_equal_to(&ac6));
expect!(&ac2).to_not(be_equal_to(&ac7));
expect!(&ac3).to(be_equal_to(&ac3));
expect!(&ac3).to_not(be_equal_to(&ac2));
expect!(&ac3).to_not(be_equal_to(&ac1));
expect!(&ac3).to_not(be_equal_to(&ac4));
expect!(&ac3).to_not(be_equal_to(&ac5));
expect!(&ac3).to_not(be_equal_to(&ac6));
expect!(&ac3).to_not(be_equal_to(&ac7));
expect!(&ac4).to(be_equal_to(&ac4));
expect!(&ac4).to_not(be_equal_to(&ac2));
expect!(&ac4).to_not(be_equal_to(&ac3));
expect!(&ac4).to_not(be_equal_to(&ac1));
expect!(&ac4).to_not(be_equal_to(&ac5));
expect!(&ac4).to_not(be_equal_to(&ac6));
expect!(&ac4).to_not(be_equal_to(&ac7));
expect!(&ac5).to(be_equal_to(&ac5));
expect!(&ac5).to_not(be_equal_to(&ac2));
expect!(&ac5).to_not(be_equal_to(&ac3));
expect!(&ac5).to_not(be_equal_to(&ac4));
expect!(&ac5).to_not(be_equal_to(&ac1));
expect!(&ac5).to_not(be_equal_to(&ac6));
expect!(&ac5).to_not(be_equal_to(&ac7));
expect!(&ac6).to(be_equal_to(&ac6));
expect!(&ac6).to_not(be_equal_to(&ac2));
expect!(&ac6).to_not(be_equal_to(&ac3));
expect!(&ac6).to_not(be_equal_to(&ac4));
expect!(&ac6).to_not(be_equal_to(&ac5));
expect!(&ac6).to_not(be_equal_to(&ac1));
expect!(&ac6).to_not(be_equal_to(&ac7));
expect!(&ac7).to(be_equal_to(&ac7));
expect!(&ac7).to_not(be_equal_to(&ac2));
expect!(&ac7).to_not(be_equal_to(&ac3));
expect!(&ac7).to_not(be_equal_to(&ac4));
expect!(&ac7).to_not(be_equal_to(&ac5));
expect!(&ac7).to_not(be_equal_to(&ac6));
expect!(&ac7).to_not(be_equal_to(&ac1));
}
/// Enumeration to define how to combine rules
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Copy, Eq, Hash, PartialOrd, Ord)]
pub enum RuleLogic {
/// All rules must match
And,
/// At least one rule must match
Or
}
impl RuleLogic {
fn to_json(&self) -> Value {
Value::String(match self {
RuleLogic::And => "AND",
RuleLogic::Or => "OR"
}.into())
}
}
/// Data structure for representing a list of rules and the logic needed to combine them
#[derive(Serialize, Deserialize, Debug, Clone, Eq)]
pub struct RuleList {
/// List of rules to apply
pub rules: Vec<MatchingRule>,
/// Rule logic to use to evaluate multiple rules
pub rule_logic: RuleLogic,
/// If this rule list has matched the exact path or if it has cascaded (i.e. is a parent)
pub cascaded: bool
}
impl RuleList {
/// Creates a new empty rule list
pub fn empty(rule_logic: RuleLogic) -> RuleList {
RuleList {
rules: Vec::new(),
rule_logic,
cascaded: false
}
}
/// Creates a default rule list with an equality matcher
pub fn equality() -> RuleList {
RuleList {
rules: vec![ MatchingRule::Equality ],
rule_logic: RuleLogic::And,
cascaded: false
}
}
/// Creates a new rule list with the single matching rule
pub fn new(rule: MatchingRule) -> RuleList {
RuleList {
rules: vec![ rule ],
rule_logic: RuleLogic::And,
cascaded: false
}
}
/// If the rule list is empty (has no matchers)
pub fn is_empty(&self) -> bool {
self.rules.is_empty()
}
fn to_v3_json(&self) -> Value {
json!({
"combine": self.rule_logic.to_json(),
"matchers": Value::Array(self.rules.iter().map(|matcher| matcher.to_json()).collect())
})
}
fn to_v2_json(&self) -> Value {
match self.rules.get(0) {
Some(rule) => rule.to_json(),
None => json!({})
}
}
/// If there is a type matcher defined for the rule list
pub fn type_matcher_defined(&self) -> bool {
self.rules.iter().any(|rule| match rule {
MatchingRule::Type => true,
MatchingRule::MinType(_) => true,
MatchingRule::MaxType(_) => true,
MatchingRule::MinMaxType(_, _) => true,
_ => false
})
}
/// If the values matcher is defined for the rule list
pub fn values_matcher_defined(&self) -> bool {
self.rules.iter().any(|rule| match rule {
MatchingRule::Values => true,
_ => false
})
}
/// Add a matching rule to the rule list
pub fn add_rule(&mut self, rule: &MatchingRule) {
self.rules.push(rule.clone())
}
/// If this rule list has matched the exact path or if it has cascaded (i.e. is a parent)
pub fn as_cascaded(&self, b: bool) -> RuleList {
RuleList {
cascaded: b,
.. self.clone()
}
}
}
impl Hash for RuleList {
fn hash<H: Hasher>(&self, state: &mut H) {
self.rule_logic.hash(state);
for rule in &self.rules {
rule.hash(state);
}
}
}
impl PartialEq for RuleList {
fn eq(&self, other: &Self) -> bool {
self.rule_logic == other.rule_logic &&
self.rules == other.rules
}
}
impl Default for RuleList {
fn default() -> Self {
RuleList::empty(RuleLogic::And)
}
}
/// Category that the matching rule is applied to
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Eq, Hash, PartialOrd, Ord)]
pub enum Category {
/// Request Method
METHOD,
/// Request Path
PATH,
/// Request/Response Header
HEADER,
/// Request Query Parameter
QUERY,
/// Body
BODY,
/// Response Status
STATUS,
/// Message contents (body)
CONTENTS,
/// Message metadata
METADATA
}
impl FromStr for Category {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"method" => Ok(Category::METHOD),
"path" => Ok(Category::PATH),
"header" => Ok(Category::HEADER),
"query" => Ok(Category::QUERY),
"body" => Ok(Category::BODY),
"status" => Ok(Category::STATUS),
"contents" => Ok(Category::CONTENTS),
"metadata" => Ok(Category::METADATA),
_ => Err(format!("'{}' is not a valid Category", s))
}
}
}
impl <'a> Into<&'a str> for Category {
fn into(self) -> &'a str {
match self {
Category::METHOD => "method",
Category::PATH => "path",
Category::HEADER => "header",
Category::QUERY => "query",
Category::BODY => "body",
Category::STATUS => "status",
Category::CONTENTS => "contents",
Category::METADATA => "metadata"
}
}
}
impl Into<String> for Category {
fn into(self) -> String {
self.to_string()
}
}
impl <'a> From<&'a str> for Category {
fn from(s: &'a str) -> Self {
Category::from_str(s).unwrap_or_default()
}
}
impl From<String> for Category {
fn from(s: String) -> Self {
Category::from_str(&s).unwrap_or_default()
}
}
impl Default for Category {
fn default() -> Self {
Category::BODY
}
}
impl Display for Category {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let s: &str = self.clone().into();
write!(f, "{}", s)
}
}
/// Data structure for representing a category of matching rules
#[derive(Serialize, Deserialize, Debug, Clone, Eq, Default)]
pub struct MatchingRuleCategory {
/// Name of the category
pub name: Category,
/// Matching rules for this category
pub rules: HashMap<DocPath, RuleList>
}
impl MatchingRuleCategory {
/// Creates an empty category
pub fn empty<S>(name: S) -> MatchingRuleCategory
where S: Into<Category>
{
MatchingRuleCategory {
name: name.into(),
rules: hashmap! {},
}
}
/// Creates a default category
pub fn equality<S>(name: S) -> MatchingRuleCategory
where S: Into<Category>
{
MatchingRuleCategory {
name: name.into(),
rules: hashmap! {
DocPath::empty() => RuleList::equality()
}
}
}
/// If the matching rules in the category are empty
pub fn is_empty(&self) -> bool {
self.rules.is_empty()
}
/// If the matching rules in the category are not empty
pub fn is_not_empty(&self) -> bool {
!self.rules.is_empty()
}
/// Adds a rule from the Value representation
pub fn rule_from_json(
&mut self,
key: DocPath,
matcher_json: &Value,
rule_logic: RuleLogic,
) -> anyhow::Result<()> {
let matching_rule = MatchingRule::from_json(matcher_json)
.with_context(|| format!("Could not parse matcher JSON {:?}", matcher_json))?;
let rules = self.rules.entry(key)
.or_insert_with(|| RuleList::empty(rule_logic));
rules.rules.push(matching_rule);
Ok(())
}
/// Adds a rule to this category
pub fn add_rule(
&mut self,
key: DocPath,
matcher: MatchingRule,
rule_logic: RuleLogic,
) {
let rules = self.rules.entry(key).or_insert_with(|| RuleList::empty(rule_logic));
rules.rules.push(matcher);
}
/// Filters the matchers in the category by the predicate, and returns a new category
pub fn filter<F>(&self, predicate: F) -> MatchingRuleCategory
where F : Fn(&(&DocPath, &RuleList)) -> bool {
MatchingRuleCategory {
name: self.name.clone(),
rules: self.rules.iter().filter(predicate)
.map(|(path, rules)| (path.clone(), rules.clone())).collect()
}
}
fn max_by_path(&self, path: &[&str]) -> RuleList {
self.rules.iter().map(|(k, v)| (k, v, k.path_weight(path)))
.filter(|&(_, _, (w, _))| w > 0)
.max_by_key(|&(_, _, (w, t))| w * t)
.map(|(_, v, (_, t))| v.as_cascaded(t != path.len()))
.unwrap_or_default()
}
/// Returns a JSON Value representation in V3 format
pub fn to_v3_json(&self) -> Value {
Value::Object(self.rules.iter().fold(serde_json::Map::new(), |mut map, (category, rulelist)| {
map.insert(String::from(category), rulelist.to_v3_json());
map
}))
}
/// Returns a JSON Value representation in V2 format
pub fn to_v2_json(&self) -> HashMap<String, Value> {
let mut map = hashmap!{};
match &self.name {
Category::PATH => for (_, v) in self.rules.clone() {
map.insert("$.path".to_string(), v.to_v2_json());
}
Category::BODY => for (k, v) in self.rules.clone() {
map.insert(String::from(k).replace("$", "$.body"), v.to_v2_json());
}
_ => for (k, v) in &self.rules {
map.insert(format!("$.{}.{}", self.name, k), v.to_v2_json());
}
};
map
}
/// If there is a type matcher defined for the category
pub fn type_matcher_defined(&self) -> bool {
self.rules.values().any(|rule_list| rule_list.type_matcher_defined())
}
/// If there is a values matcher defined in the rules
pub fn values_matcher_defined(&self) -> bool {
self.rules.values().any(|rule_list| rule_list.values_matcher_defined())
}
/// If there is a matcher defined for the path
pub fn matcher_is_defined(&self, path: &[&str]) -> bool {
let result = !self.resolve_matchers_for_path(path).is_empty();
trace!("matcher_is_defined: for category {} and path {:?} -> {}", self.name.to_string(), path, result);
result
}
/// filters this category with all rules that match the given path for categories that contain
/// collections (eg. bodies, headers, query parameters). Returns self otherwise.
pub fn resolve_matchers_for_path(&self, path: &[&str]) -> MatchingRuleCategory {
match self.name {
Category::HEADER| Category::QUERY | Category::BODY |
Category::CONTENTS | Category::METADATA => self.filter(|(val, _)| {
val.matches_path(path)
}),
_ => self.clone()
}
}
/// Selects the best matcher for the given path by calculating a weighting for each one
pub fn select_best_matcher(&self, path: &[&str]) -> RuleList {
match self.name {
Category::BODY | Category::METADATA => self.max_by_path(path),
_ => self.resolve_matchers_for_path(path).as_rule_list()
}
}
/// Returns this category as a matching rule list. Returns a None if there are no rules
pub fn as_rule_list(&self) -> RuleList {
self.rules.values().next().cloned().unwrap_or_default()
}
/// Adds the rules to the category from the provided JSON
pub fn add_rules_from_json(&mut self, rules: &Value) -> anyhow::Result<()> {
if self.name == Category::PATH && rules.get("matchers").is_some() {
let rule_logic = match rules.get("combine") {
Some(val) => if json_to_string(val).to_uppercase() == "OR" {
RuleLogic::Or
} else {
RuleLogic::And
},
None => RuleLogic::And
};
if let Some(matchers) = rules.get("matchers") {
if let Value::Array(array) = matchers {
for matcher in array {
self.rule_from_json(DocPath::empty(), &matcher, rule_logic)?;
}
}
}
} else if let Value::Object(m) = rules {
if m.contains_key("matchers") {
self.add_rule_list(DocPath::empty(), rules)?;
} else {
for (k, v) in m {
self.add_rule_list(DocPath::new(k)?, v)?;
}
}
}
Ok(())
}
fn add_rule_list(&mut self, k: DocPath, v: &Value) -> anyhow::Result<()> {
let rule_logic = match v.get("combine") {
Some(val) => if json_to_string(val).to_uppercase() == "OR" {
RuleLogic::Or
} else {
RuleLogic::And
},
None => RuleLogic::And
};
if let Some(&Value::Array(ref array)) = v.get("matchers") {
for matcher in array {
self.rule_from_json(k.clone(), &matcher, rule_logic)?;
}
}
Ok(())
}
/// Returns any generators associated with these matching rules
pub fn generators(&self) -> HashMap<DocPath, Generator> {
let mut generators = hashmap!{};
for (base_path, rules) in &self.rules {
for rule in &rules.rules {
if rule.has_generators() {
for generator in rule.generators() {
generators.insert(base_path.clone(), generator);
}
}
}
}
generators
}
/// Clones this category with the new name
pub fn rename<S>(&self, name: S) -> Self
where S: Into<Category> {
MatchingRuleCategory {
name: name.into(),
.. self.clone()
}
}
}
impl Hash for MatchingRuleCategory {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name.hash(state);
for (k, v) in self.rules.clone() {
k.hash(state);
v.hash(state);
}
}
}
impl PartialEq for MatchingRuleCategory {
fn eq(&self, other: &Self) -> bool {
self.name == other.name && self.rules == other.rules
}
fn ne(&self, other: &Self) -> bool {
self.name != other.name || self.rules != other.rules
}
}
impl PartialOrd for MatchingRuleCategory {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.name.partial_cmp(&other.name)
}
}
impl Ord for MatchingRuleCategory {
fn cmp(&self, other: &Self) -> Ordering {
self.name.cmp(&other.name)
}
}
/// Data structure for representing a collection of matchers
#[derive(Serialize, Deserialize, Debug, Clone, Eq)]
#[serde(transparent)]
pub struct MatchingRules {
/// Categories of matching rules
pub rules: HashMap<Category, MatchingRuleCategory>
}
impl MatchingRules {
/// If the matching rules are empty (that is there are no rules assigned to any categories)
pub fn is_empty(&self) -> bool {
self.rules.values().all(|category| category.is_empty())
}
/// If the matching rules are not empty (that is there is at least one rule assigned to a category)
pub fn is_not_empty(&self) -> bool {
self.rules.values().any(|category| category.is_not_empty())
}
/// Adds the category to the map of rules
pub fn add_category<S>(&mut self, category: S) -> &mut MatchingRuleCategory
where S: Into<Category> + Clone
{
let category = category.into();
if !self.rules.contains_key(&category) {
self.rules.insert(category.clone(), MatchingRuleCategory::empty(category.clone()));
}
self.rules.get_mut(&category).unwrap()
}
/// Returns all the category names in this rule set
pub fn categories(&self) -> HashSet<Category> {
self.rules.keys().cloned().collect()
}
/// Returns the category of rules for a given category name
pub fn rules_for_category<S>(&self, category: S) -> Option<MatchingRuleCategory>
where S: Into<Category> {
self.rules.get(&category.into()).cloned()
}
/// If there is a matcher defined for the category and path
pub fn matcher_is_defined<S>(&self, category: S, path: &Vec<&str>) -> bool
where S: Into<Category> + Clone {
let result = match self.resolve_matchers(category.clone().into(), path) {
Some(ref rules) => !rules.is_empty(),
None => false
};
trace!("matcher_is_defined for category {} and path {:?} -> {}", category.into(), path, result);
result
}
/// If there is a wildcard matcher defined for the category and path
pub fn wildcard_matcher_is_defined<S>(&self, category: S, path: &Vec<&str>) -> bool
where S: Into<Category> + Clone {
match self.resolve_wildcard_matchers(category, path) {
Some(ref rules) => !rules.filter(|&(val, _)| val.is_wildcard()).is_empty(),
None => false
}
}
/// If there is a type matcher defined for the category and path
pub fn type_matcher_defined<S>(&self, category: S, path: &Vec<&str>) -> bool
where S: Into<Category> + Display + Clone {
let result = match self.resolve_matchers(category.clone(), path) {
Some(ref rules) => rules.type_matcher_defined(),
None => false
};
trace!("type_matcher_defined for category {} and path {:?} -> {}", category.into(), path, result);
result
}
/// Returns a `Category` filtered with all rules that match the given path.
pub fn resolve_matchers<S>(&self, category: S, path: &Vec<&str>) -> Option<MatchingRuleCategory>
where S: Into<Category> {
self.rules_for_category(category)
.map(|rules| rules.resolve_matchers_for_path(path))
}
/// Returns a list of rules from the body category that match the given path
pub fn resolve_body_matchers_by_path(&self, path: &Vec<&str>) -> RuleList {
match self.rules_for_category("body") {
Some(category) => category.max_by_path(path),
None => RuleList::default()
}
}
fn resolve_wildcard_matchers<S>(&self, category: S, path: &Vec<&str>) -> Option<MatchingRuleCategory>
where S: Into<Category> + Clone {
let category = category.into();
match category {
Category::BODY => self.rules_for_category(Category::BODY).map(|category| category.filter(|&(val, _)| {
val.matches_path_exactly(path)
})),
Category::HEADER | Category::QUERY => self.rules_for_category(category.clone()).map(|category| category.filter(|&(val, _)| {
path.len() == 1 && Some(path[0]) == val.first_field()
})),
_ => self.rules_for_category(category)
}
}
fn load_from_v2_map(&mut self, map: &serde_json::Map<String, Value>
) -> anyhow::Result<()> {
for (key, v) in map {
let path = key.split('.').collect::<Vec<&str>>();
if key.starts_with("$.body") {
if key == "$.body" {
self.add_v2_rule("body", DocPath::root(), v)?;
} else {
self.add_v2_rule("body", DocPath::new(format!("${}", &key[6..]))?, v)?;
}
} else if key.starts_with("$.headers") {
self.add_v2_rule("header", DocPath::new(path[2])?, v)?;
} else {
self.add_v2_rule(
path[1],
if path.len() > 2 { DocPath::new(path[2])? } else { DocPath::empty() },
v,
)?;
}
}
Ok(())
}
fn load_from_v3_map(&mut self, map: &serde_json::Map<String, Value>
) -> anyhow::Result<()> {
for (k, v) in map {
self.add_rules(k, v)?;
}
Ok(())
}
fn add_rules<S: Into<String>>(&mut self, category_name: S, rules: &Value
) -> anyhow::Result<()> {
let category = self.add_category(category_name.into());
category.add_rules_from_json(rules)
}
fn add_v2_rule<S: Into<String>>(
&mut self,
category_name: S,
sub_category: DocPath,
rule: &Value,
) -> anyhow::Result<()> {
let category = self.add_category(category_name.into());
category.rule_from_json(sub_category, rule, RuleLogic::And)
}
fn to_v3_json(&self) -> Value {
Value::Object(self.rules.iter().fold(serde_json::Map::new(), |mut map, (name, sub_category)| {
match name {
Category::PATH => if let Some(rules) = sub_category.rules.get(&DocPath::empty()) {
map.insert(name.to_string(), rules.to_v3_json());
}
_ => {
map.insert(name.to_string(), sub_category.to_v3_json());
}
}
map
}))
}
fn to_v2_json(&self) -> Value {
Value::Object(self.rules.iter().fold(serde_json::Map::new(), |mut map, (_, category)| {
for (key, value) in category.to_v2_json() {
map.insert(key.clone(), value);
}
map
}))
}
/// Clones the matching rules, renaming the category
pub fn rename<S>(&self, old_name: S, new_name: S) -> Self
where S: Into<Category> {
let old = old_name.into();
let new = new_name.into();
MatchingRules {
rules: self.rules.iter().map(|(key, value)| {
if key == &old {
(new.clone(), value.rename(new.clone()))
} else {
(key.clone(), value.clone())
}
}).collect()
}
}
}
impl Hash for MatchingRules {
fn hash<H: Hasher>(&self, state: &mut H) {
for (k, v) in self.rules.iter() {
k.hash(state);
v.hash(state);
}
}
}
impl PartialEq for MatchingRules {
fn eq(&self, other: &Self) -> bool {
self.rules == other.rules
}
fn ne(&self, other: &Self) -> bool {
self.rules != other.rules
}
}
impl Default for MatchingRules {
fn default() -> Self {
MatchingRules {
rules: hashmap!{}
}
}
}
/// Parses the matching rules from the Value structure
pub fn matchers_from_json(value: &Value, deprecated_name: &Option<String>
) -> anyhow::Result<MatchingRules> {
let matchers_json = match (value.get("matchingRules"), deprecated_name.clone().and_then(|name| value.get(&name))) {
(Some(v), _) => Some(v),
(None, Some(v)) => Some(v),
(None, None) => None
};
let mut matching_rules = MatchingRules::default();
match matchers_json {
Some(value) => match value {
&Value::Object(ref m) => {
if m.keys().next().unwrap_or(&String::default()).starts_with("$") {
matching_rules.load_from_v2_map(m)?
} else {
matching_rules.load_from_v3_map(m)?
}
},
_ => ()
},
None => ()
}
Ok(matching_rules)
}
/// Generates a Value structure for the provided matching rules
pub fn matchers_to_json(matchers: &MatchingRules, spec_version: &PactSpecification) -> Value {
match spec_version {
&PactSpecification::V3 | &PactSpecification::V4 => matchers.to_v3_json(),
_ => matchers.to_v2_json()
}
}
/// Macro to ease constructing matching rules
/// Example usage:
/// ```ignore
/// matchingrules! {
/// "query" => { "user_id" => [ MatchingRule::Regex(s!("^[0-9]+$")) ] }
/// }
/// ```
#[macro_export]
macro_rules! matchingrules {
( $( $name:expr => {
$( $subname:expr => [ $( $matcher:expr ), * ] ),*
}), * ) => {{
let mut _rules = $crate::matchingrules::MatchingRules::default();
$({
let mut _category = _rules.add_category($name);
$({
$({
_category.add_rule(
$crate::path_exp::DocPath::new_unwrap($subname),
$matcher,
$crate::matchingrules::RuleLogic::And,
);
})*
})*
})*
_rules
}};
}
/// Macro to ease constructing matching rules
/// Example usage:
/// ```ignore
/// matchingrules_list! {
/// "body"; "user_id" => [ MatchingRule::Regex(s!("^[0-9]+$")) ]
/// }
/// ```
#[macro_export]
macro_rules! matchingrules_list {
( $name:expr ; $( $subname:expr => [ $( $matcher:expr ), * ] ),* ) => {{
let mut _category = $crate::matchingrules::MatchingRuleCategory::empty($name);
$(
$(
_category.add_rule(
$crate::path_exp::DocPath::new_unwrap($subname),
$matcher,
$crate::matchingrules::RuleLogic::And,
);
)*
)*
_category
}};
( $name:expr ; [ $( $matcher:expr ), * ] ) => {{
let mut _category = $crate::matchingrules::MatchingRuleCategory::empty($name);
$(
_category.add_rule(
$crate::path_exp::DocPath::empty(),
$matcher,
$crate::matchingrules::RuleLogic::And,
);
)*
_category
}};
}
#[cfg(test)]
mod tests {
use expectest::prelude::*;
use maplit::hashset;
use serde_json::Value;
use speculate::speculate;
use crate::generators::*;
use super::*;
use super::super::*;
#[test]
fn rules_are_empty_when_there_are_no_categories() {
expect!(MatchingRules::default().is_empty()).to(be_true());
}
#[test]
fn rules_are_empty_when_there_are_only_empty_categories() {
expect!(MatchingRules {
rules: hashmap!{
"body".into() => MatchingRuleCategory::empty("body"),
"header".into() => MatchingRuleCategory::empty("header"),
"query".into() => MatchingRuleCategory::empty("query")
}
}.is_empty()).to(be_true());
}
#[test]
fn rules_are_not_empty_when_there_is_a_nonempty_category() {
expect!(MatchingRules {
rules: hashmap!{
"body".into() => MatchingRuleCategory::empty("body"),
"header".into() => MatchingRuleCategory::empty("headers"),
"query".into() => MatchingRuleCategory {
name: "query".into(),
rules: hashmap!{
DocPath::empty() => RuleList {
rules: vec![ MatchingRule::Equality ],
rule_logic: RuleLogic::And,
cascaded: false
}
}
},
}
}.is_empty()).to(be_false());
}
#[test]
fn matchers_from_json_test() {
let matching_rules = matchers_from_json(&Value::Null, &None);
let matching_rules = matching_rules.unwrap();
expect!(matching_rules.rules.iter()).to(be_empty());
}
#[test]
fn loads_v2_matching_rules() {
let matching_rules_json = Value::from_str(r#"{"matchingRules": {
"$.path": { "match": "regex", "regex": "\\w+" },
"$.query.Q1": { "match": "regex", "regex": "\\d+" },
"$.header.HEADERY": {"match": "include", "value": "ValueA"},
"$.body.animals": {"min": 1, "match": "type"},
"$.body.animals[*].*": {"match": "type"},
"$.body.animals[*].children": {"min": 1},
"$.body.animals[*].children[*].*": {"match": "type"}
}}"#).unwrap();
let matching_rules = matchers_from_json(&matching_rules_json, &None);
let matching_rules = matching_rules.unwrap();
expect!(matching_rules.rules.iter()).to_not(be_empty());
expect!(matching_rules.categories()).to(be_equal_to(hashset!{
Category::PATH, Category::QUERY, Category::HEADER, Category::BODY
}));
expect!(matching_rules.rules_for_category("path")).to(be_some().value(MatchingRuleCategory {
name: "path".into(),
rules: hashmap! { DocPath::empty() => RuleList { rules: vec![ MatchingRule::Regex("\\w+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } }
}));
expect!(matching_rules.rules_for_category("query")).to(be_some().value(MatchingRuleCategory {
name: "query".into(),
rules: hashmap!{ DocPath::new_unwrap("Q1") => RuleList { rules: vec![ MatchingRule::Regex("\\d+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } }
}));
expect!(matching_rules.rules_for_category("header")).to(be_some().value(MatchingRuleCategory {
name: "header".into(),
rules: hashmap!{ DocPath::new_unwrap("HEADERY") => RuleList { rules: vec![
MatchingRule::Include("ValueA".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } }
}));
expect!(matching_rules.rules_for_category("body")).to(be_some().value(MatchingRuleCategory {
name: "body".into(),
rules: hashmap!{
DocPath::new_unwrap("$.animals") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false },
DocPath::new_unwrap("$.animals[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false },
DocPath::new_unwrap("$.animals[*].children") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false },
DocPath::new_unwrap("$.animals[*].children[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false }
}
}));
}
#[test]
fn loads_v3_matching_rules() {
let matching_rules_json = Value::from_str(r#"{"matchingRules": {
"path": {
"matchers": [
{ "match": "regex", "regex": "\\w+" }
]
},
"query": {
"Q1": {
"matchers": [
{ "match": "regex", "regex": "\\d+" }
]
}
},
"header": {
"HEADERY": {
"combine": "OR",
"matchers": [
{"match": "include", "value": "ValueA"},
{"match": "include", "value": "ValueB"}
]
}
},
"body": {
"$.animals": {
"matchers": [{"min": 1, "match": "type"}]
},
"$.animals[*].*": {
"matchers": [{"match": "type"}]
},
"$.animals[*].children": {
"matchers": [{"min": 1}]
},
"$.animals[*].children[*].*": {
"matchers": [{"match": "type"}]
}
}
}}"#).unwrap();
let matching_rules = matchers_from_json(&matching_rules_json, &None);
let matching_rules = matching_rules.unwrap();
expect!(matching_rules.rules.iter()).to_not(be_empty());
expect!(matching_rules.categories()).to(be_equal_to(hashset!{
Category::PATH, Category::QUERY, Category::HEADER, Category::BODY
}));
expect!(matching_rules.rules_for_category("path")).to(be_some().value(MatchingRuleCategory {
name: "path".into(),
rules: hashmap! { DocPath::empty() => RuleList { rules: vec![ MatchingRule::Regex("\\w+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } }
}));
expect!(matching_rules.rules_for_category("query")).to(be_some().value(MatchingRuleCategory {
name: "query".into(),
rules: hashmap!{ DocPath::new_unwrap("Q1") => RuleList { rules: vec![ MatchingRule::Regex("\\d+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } }
}));
expect!(matching_rules.rules_for_category("header")).to(be_some().value(MatchingRuleCategory {
name: "header".into(),
rules: hashmap!{ DocPath::new_unwrap("HEADERY") => RuleList { rules: vec![
MatchingRule::Include("ValueA".to_string()),
MatchingRule::Include("ValueB".to_string()) ], rule_logic: RuleLogic::Or, cascaded: false } }
}));
expect!(matching_rules.rules_for_category("body")).to(be_some().value(MatchingRuleCategory {
name: "body".into(),
rules: hashmap!{
DocPath::new_unwrap("$.animals") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false },
DocPath::new_unwrap("$.animals[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false },
DocPath::new_unwrap("$.animals[*].children") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false },
DocPath::new_unwrap("$.animals[*].children[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false }
}
}));
}
#[test]
fn correctly_loads_v3_matching_rules_with_incorrect_path_format() {
let matching_rules_json = Value::from_str(r#"{"matchingRules": {
"path": {
"": {
"matchers": [
{ "match": "regex", "regex": "\\w+" }
]
}
}
}}"#).unwrap();
let matching_rules = matchers_from_json(&matching_rules_json, &None);
let matching_rules = matching_rules.unwrap();
expect!(matching_rules.rules.iter()).to_not(be_empty());
expect!(matching_rules.categories()).to(be_equal_to(hashset!{ Category::PATH }));
expect!(matching_rules.rules_for_category("path")).to(be_some().value(MatchingRuleCategory {
name: "path".into(),
rules: hashmap! { DocPath::empty() => RuleList { rules: vec![ MatchingRule::Regex("\\w+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } }
}));
}
speculate! {
describe "generating matcher JSON" {
before {
let matchers = matchingrules!{
"body" => {
"$.a.b" => [ MatchingRule::Type ]
},
"path" => { "" => [ MatchingRule::Regex("/path/\\d+".to_string()) ] },
"query" => {
"a" => [ MatchingRule::Regex("\\w+".to_string()) ]
},
"header" => {
"item1" => [ MatchingRule::Regex("5".to_string()) ]
}
};
}
it "generates V2 matcher format" {
expect!(matchers.to_v2_json().to_string()).to(be_equal_to(
"{\"$.body.a.b\":{\"match\":\"type\"},\
\"$.header.item1\":{\"match\":\"regex\",\"regex\":\"5\"},\
\"$.path\":{\"match\":\"regex\",\"regex\":\"/path/\\\\d+\"},\
\"$.query.a\":{\"match\":\"regex\",\"regex\":\"\\\\w+\"}}"
));
}
it "generates V3 matcher format" {
expect!(matchers.to_v3_json().to_string()).to(be_equal_to(
"{\"body\":{\"$.a.b\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"type\"}]}},\
\"header\":{\"item1\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"regex\",\"regex\":\"5\"}]}},\
\"path\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"regex\",\"regex\":\"/path/\\\\d+\"}]},\
\"query\":{\"a\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"regex\",\"regex\":\"\\\\w+\"}]}}}"
));
}
}
}
#[test]
fn matching_rule_from_json_test() {
expect!(MatchingRule::from_json(&Value::from_str("\"test string\"").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("null").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("[]").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("true").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("false").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("100").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("100.10").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"stuff\": 100}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"stuff\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"regex\": \"[0-9]\"}").unwrap())).to(
be_ok().value(MatchingRule::Regex("[0-9]".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"min\": 100}").unwrap())).to(
be_ok().value(MatchingRule::MinType(100)));
expect!(MatchingRule::from_json(&Value::from_str("{\"max\": 100}").unwrap())).to(
be_ok().value(MatchingRule::MaxType(100)));
expect!(MatchingRule::from_json(&Value::from_str("{\"timestamp\": \"yyyy\"}").unwrap())).to(
be_ok().value(MatchingRule::Timestamp("yyyy".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"date\": \"yyyy\"}").unwrap())).to(
be_ok().value(MatchingRule::Date("yyyy".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"time\": \"hh:mm\"}").unwrap())).to(
be_ok().value(MatchingRule::Time("hh:mm".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"regex\", \"regex\": \"[0-9]\"}").unwrap())).to(
be_ok().value(MatchingRule::Regex("[0-9]".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"regex\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"equality\"}").unwrap())).to(
be_ok().value(MatchingRule::Equality));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"include\", \"value\": \"A\"}").unwrap())).to(
be_ok().value(MatchingRule::Include("A".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"include\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"min\": 1}").unwrap())).to(
be_ok().value(MatchingRule::MinType(1)));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"max\": \"1\"}").unwrap())).to(
be_ok().value(MatchingRule::MaxType(1)));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"min\": 1, \"max\": \"1\"}").unwrap())).to(
be_ok().value(MatchingRule::MinMaxType(1, 1)));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\"}").unwrap())).to(
be_ok().value(MatchingRule::Type));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"value\": 100}").unwrap())).to(
be_ok().value(MatchingRule::Type));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"min\", \"min\": 1}").unwrap())).to(
be_ok().value(MatchingRule::MinType(1)));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"max\", \"max\": \"1\"}").unwrap())).to(
be_ok().value(MatchingRule::MaxType(1)));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"min\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"max\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"number\"}").unwrap())).to(
be_ok().value(MatchingRule::Number));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"integer\"}").unwrap())).to(
be_ok().value(MatchingRule::Integer));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"decimal\"}").unwrap())).to(
be_ok().value(MatchingRule::Decimal));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"real\"}").unwrap())).to(
be_ok().value(MatchingRule::Decimal));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"boolean\"}").unwrap())).to(
be_ok().value(MatchingRule::Boolean));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"timestamp\", \"timestamp\": \"A\"}").unwrap())).to(
be_ok().value(MatchingRule::Timestamp("A".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"timestamp\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"time\", \"time\": \"A\"}").unwrap())).to(
be_ok().value(MatchingRule::Time("A".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"time\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"date\", \"date\": \"A\"}").unwrap())).to(
be_ok().value(MatchingRule::Date("A".to_string())));
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"date\"}").unwrap())).to(be_err());
expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"null\"}").unwrap())).to(
be_ok().value(MatchingRule::Null));
let json = json!({
"match": "arrayContains",
"variants": []
});
expect!(MatchingRule::from_json(&json)).to(be_ok().value(MatchingRule::ArrayContains(vec![])));
let json = json!({
"match": "arrayContains",
"variants": [
{
"index": 0,
"rules": {
"matchers": [ { "match": "equality" } ]
}
}
]
});
expect!(MatchingRule::from_json(&json)).to(be_ok().value(
MatchingRule::ArrayContains(
vec![
(0, matchingrules_list! { "body"; [ MatchingRule::Equality ] }, HashMap::default())
])
));
let json = json!({
"match": "arrayContains",
"variants": [
{
"index": 0,
"rules": {
"matchers": [ { "match": "equality" } ]
},
"generators": {
"a": { "type": "Uuid" }
}
}
]
});
let generators = hashmap!{ DocPath::new_unwrap("a") => Generator::Uuid(None) };
expect!(MatchingRule::from_json(&json)).to(be_ok().value(
MatchingRule::ArrayContains(
vec![
(0, matchingrules_list! { "body"; [ MatchingRule::Equality ] }, generators)
])
));
let json = json!({
"match": "statusCode",
"status": "success"
});
expect!(MatchingRule::from_json(&json)).to(be_ok().value(
MatchingRule::StatusCode(HttpStatus::Success)
));
let json = json!({
"match": "statusCode",
"status": [200, 201, 204]
});
expect!(MatchingRule::from_json(&json)).to(be_ok().value(
MatchingRule::StatusCode(HttpStatus::StatusCodes(vec![200, 201, 204]))
));
}
#[test]
fn matching_rule_to_json_test() {
expect!(MatchingRule::StatusCode(HttpStatus::ClientError).to_json()).to(
be_equal_to(json!({
"match": "statusCode",
"status": "clientError"
})));
expect!(MatchingRule::StatusCode(HttpStatus::StatusCodes(vec![400, 401, 404])).to_json()).to(
be_equal_to(json!({
"match": "statusCode",
"status": [400, 401, 404]
})));
}
#[test]
fn matcher_is_defined_returns_false_when_there_are_no_matchers() {
let matchers = matchingrules!{};
expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false());
}
#[test]
fn matcher_is_defined_returns_false_when_the_path_does_not_have_a_matcher_entry() {
let matchers = matchingrules!{
"body" => { }
};
expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false());
}
#[test]
fn matcher_is_defined_returns_true_when_the_path_does_have_a_matcher_entry() {
let matchers = matchingrules! {
"body" => {
"$.a.b" => [ MatchingRule::Type ]
}
};
expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_true());
}
#[test]
fn matcher_is_defined_returns_false_when_the_path_is_empty() {
let matchers = matchingrules! {
"body" => {
"$.a.b" => [ MatchingRule::Type ]
}
};
expect!(matchers.matcher_is_defined("body", &vec![])).to(be_false());
}
#[test]
fn matcher_is_defined_returns_true_when_the_parent_of_the_path_does_have_a_matcher_entry() {
let matchers = matchingrules!{
"body" => {
"$.a.b" => [ MatchingRule::Type ]
}
};
expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b", "c"])).to(be_true());
}
#[test]
fn wildcard_matcher_is_defined_returns_false_when_there_are_no_matchers() {
let matchers = matchingrules!{};
expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false());
}
#[test]
fn wildcard_matcher_is_defined_returns_false_when_the_path_does_not_have_a_matcher_entry() {
let matchers = matchingrules!{
"body" => { }
};
expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false());
}
#[test]
fn wildcard_matcher_is_defined_returns_false_when_the_path_does_have_a_matcher_entry_and_it_is_not_a_wildcard() {
let matchers = matchingrules!{
"body" => {
"$.a.b" => [ MatchingRule::Type ],
"$.*" => [ MatchingRule::Type ]
}
};
expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false());
}
#[test]
fn wildcard_matcher_is_defined_returns_true_when_the_path_does_have_a_matcher_entry_and_it_is_a_widcard() {
let matchers = matchingrules!{
"body" => {
"$.a.*" => [ MatchingRule::Type ]
}
};
expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_true());
}
#[test]
fn wildcard_matcher_is_defined_returns_false_when_the_parent_of_the_path_does_have_a_matcher_entry() {
let matchers = matchingrules!{
"body" => {
"$.a.*" => [ MatchingRule::Type ]
}
};
expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b", "c"])).to(be_false());
}
#[test]
fn min_and_max_values_get_serialised_to_json_as_numbers() {
expect!(MatchingRule::MinType(1).to_json().to_string()).to(be_equal_to("{\"match\":\"type\",\"min\":1}"));
expect!(MatchingRule::MaxType(1).to_json().to_string()).to(be_equal_to("{\"match\":\"type\",\"max\":1}"));
expect!(MatchingRule::MinMaxType(1, 10).to_json().to_string()).to(be_equal_to("{\"match\":\"type\",\"max\":10,\"min\":1}"));
}
}
| 36.884346 | 166 | 0.596897 |
1c60ad30951b1973da168365f7bcfee7f5a9e8f6 | 362 | #[cfg(feature = "file")]
use fluvio_future::zero_copy::SendFileError;
use std::io::Error as IoError;
#[derive(thiserror::Error, Debug)]
pub enum SocketError {
#[error(transparent)]
Io(#[from] IoError),
#[error("Socket closed")]
SocketClosed,
#[cfg(feature = "file")]
#[error("Zero-copy IO error")]
SendFile(#[from] SendFileError),
}
| 24.133333 | 44 | 0.646409 |
f45c6c4c58066e2b173ffb4f3aa3c30b0d318ad8 | 125 | pub mod psp721;
pub use psp721::*;
pub mod extensions {
pub mod burnable;
pub mod metadata;
pub mod mintable;
}
| 13.888889 | 21 | 0.656 |
91b92d84a811fde2a3336ca6e4c2000bb1391264 | 33,504 | //! AST walker. Each overridden visit method has full control over what
//! happens with its node, it can do its own traversal of the node's children,
//! call `visit::walk_*` to apply the default traversal algorithm, or prevent
//! deeper traversal by doing nothing.
//!
//! Note: it is an important invariant that the default visitor walks the body
//! of a function in "execution order" (more concretely, reverse post-order
//! with respect to the CFG implied by the AST), meaning that if AST node A may
//! execute before AST node B, then A is visited first. The borrow checker in
//! particular relies on this property.
//!
//! Note: walking an AST before macro expansion is probably a bad idea. For
//! instance, a walker looking for item names in a module will miss all of
//! those that are created by the expansion of a macro.
use crate::ast::*;
use crate::parse::token::Token;
use crate::tokenstream::{TokenTree, TokenStream};
use syntax_pos::Span;
#[derive(Copy, Clone)]
pub enum FnKind<'a> {
/// fn foo() or extern "Abi" fn foo()
ItemFn(Ident, &'a FnHeader, &'a Visibility, &'a Block),
/// fn foo(&self)
Method(Ident, &'a MethodSig, Option<&'a Visibility>, &'a Block),
/// |x, y| body
Closure(&'a Expr),
}
impl<'a> FnKind<'a> {
pub fn header(&self) -> Option<&'a FnHeader> {
match *self {
FnKind::ItemFn(_, header, _, _) => Some(header),
FnKind::Method(_, sig, _, _) => Some(&sig.header),
FnKind::Closure(_) => None,
}
}
}
/// Each method of the Visitor trait is a hook to be potentially
/// overridden. Each method's default implementation recursively visits
/// the substructure of the input via the corresponding `walk` method;
/// e.g., the `visit_mod` method by default calls `visit::walk_mod`.
///
/// If you want to ensure that your code handles every variant
/// explicitly, you need to override each method. (And you also need
/// to monitor future changes to `Visitor` in case a new method with a
/// new default implementation gets introduced.)
pub trait Visitor<'ast>: Sized {
fn visit_name(&mut self, _span: Span, _name: Name) {
// Nothing to do.
}
fn visit_ident(&mut self, ident: Ident) {
walk_ident(self, ident);
}
fn visit_mod(&mut self, m: &'ast Mod, _s: Span, _attrs: &[Attribute], _n: NodeId) {
walk_mod(self, m);
}
fn visit_foreign_item(&mut self, i: &'ast ForeignItem) { walk_foreign_item(self, i) }
fn visit_global_asm(&mut self, ga: &'ast GlobalAsm) { walk_global_asm(self, ga) }
fn visit_item(&mut self, i: &'ast Item) { walk_item(self, i) }
fn visit_local(&mut self, l: &'ast Local) { walk_local(self, l) }
fn visit_block(&mut self, b: &'ast Block) { walk_block(self, b) }
fn visit_stmt(&mut self, s: &'ast Stmt) { walk_stmt(self, s) }
fn visit_arg(&mut self, arg: &'ast Arg) { walk_arg(self, arg) }
fn visit_arm(&mut self, a: &'ast Arm) { walk_arm(self, a) }
fn visit_pat(&mut self, p: &'ast Pat) { walk_pat(self, p) }
fn visit_anon_const(&mut self, c: &'ast AnonConst) { walk_anon_const(self, c) }
fn visit_expr(&mut self, ex: &'ast Expr) { walk_expr(self, ex) }
fn visit_expr_post(&mut self, _ex: &'ast Expr) { }
fn visit_ty(&mut self, t: &'ast Ty) { walk_ty(self, t) }
fn visit_generic_param(&mut self, param: &'ast GenericParam) {
walk_generic_param(self, param)
}
fn visit_generics(&mut self, g: &'ast Generics) { walk_generics(self, g) }
fn visit_where_predicate(&mut self, p: &'ast WherePredicate) {
walk_where_predicate(self, p)
}
fn visit_fn(&mut self, fk: FnKind<'ast>, fd: &'ast FnDecl, s: Span, _: NodeId) {
walk_fn(self, fk, fd, s)
}
fn visit_trait_item(&mut self, ti: &'ast TraitItem) { walk_trait_item(self, ti) }
fn visit_impl_item(&mut self, ii: &'ast ImplItem) { walk_impl_item(self, ii) }
fn visit_trait_ref(&mut self, t: &'ast TraitRef) { walk_trait_ref(self, t) }
fn visit_param_bound(&mut self, bounds: &'ast GenericBound) {
walk_param_bound(self, bounds)
}
fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
walk_poly_trait_ref(self, t, m)
}
fn visit_variant_data(&mut self, s: &'ast VariantData, _: Ident,
_: &'ast Generics, _: NodeId, _: Span) {
walk_struct_def(self, s)
}
fn visit_struct_field(&mut self, s: &'ast StructField) { walk_struct_field(self, s) }
fn visit_enum_def(&mut self, enum_definition: &'ast EnumDef,
generics: &'ast Generics, item_id: NodeId, _: Span) {
walk_enum_def(self, enum_definition, generics, item_id)
}
fn visit_variant(&mut self, v: &'ast Variant, g: &'ast Generics, item_id: NodeId) {
walk_variant(self, v, g, item_id)
}
fn visit_label(&mut self, label: &'ast Label) {
walk_label(self, label)
}
fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) {
walk_lifetime(self, lifetime)
}
fn visit_mac(&mut self, _mac: &'ast Mac) {
panic!("visit_mac disabled by default");
// N.B., see note about macros above.
// if you really want a visitor that
// works on macros, use this
// definition in your trait impl:
// visit::walk_mac(self, _mac)
}
fn visit_mac_def(&mut self, _mac: &'ast MacroDef, _id: NodeId) {
// Nothing to do
}
fn visit_path(&mut self, path: &'ast Path, _id: NodeId) {
walk_path(self, path)
}
fn visit_use_tree(&mut self, use_tree: &'ast UseTree, id: NodeId, _nested: bool) {
walk_use_tree(self, use_tree, id)
}
fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
walk_path_segment(self, path_span, path_segment)
}
fn visit_generic_args(&mut self, path_span: Span, generic_args: &'ast GenericArgs) {
walk_generic_args(self, path_span, generic_args)
}
fn visit_generic_arg(&mut self, generic_arg: &'ast GenericArg) {
match generic_arg {
GenericArg::Lifetime(lt) => self.visit_lifetime(lt),
GenericArg::Type(ty) => self.visit_ty(ty),
GenericArg::Const(ct) => self.visit_anon_const(ct),
}
}
fn visit_assoc_ty_constraint(&mut self, constraint: &'ast AssocTyConstraint) {
walk_assoc_ty_constraint(self, constraint)
}
fn visit_attribute(&mut self, attr: &'ast Attribute) {
walk_attribute(self, attr)
}
fn visit_tt(&mut self, tt: TokenTree) {
walk_tt(self, tt)
}
fn visit_tts(&mut self, tts: TokenStream) {
walk_tts(self, tts)
}
fn visit_token(&mut self, _t: Token) {}
// FIXME: add `visit_interpolated` and `walk_interpolated`
fn visit_vis(&mut self, vis: &'ast Visibility) {
walk_vis(self, vis)
}
fn visit_fn_ret_ty(&mut self, ret_ty: &'ast FunctionRetTy) {
walk_fn_ret_ty(self, ret_ty)
}
fn visit_fn_header(&mut self, _header: &'ast FnHeader) {
// Nothing to do
}
}
#[macro_export]
macro_rules! walk_list {
($visitor: expr, $method: ident, $list: expr) => {
for elem in $list {
$visitor.$method(elem)
}
};
($visitor: expr, $method: ident, $list: expr, $($extra_args: expr),*) => {
for elem in $list {
$visitor.$method(elem, $($extra_args,)*)
}
}
}
pub fn walk_ident<'a, V: Visitor<'a>>(visitor: &mut V, ident: Ident) {
visitor.visit_name(ident.span, ident.name);
}
pub fn walk_crate<'a, V: Visitor<'a>>(visitor: &mut V, krate: &'a Crate) {
visitor.visit_mod(&krate.module, krate.span, &krate.attrs, CRATE_NODE_ID);
walk_list!(visitor, visit_attribute, &krate.attrs);
}
pub fn walk_mod<'a, V: Visitor<'a>>(visitor: &mut V, module: &'a Mod) {
walk_list!(visitor, visit_item, &module.items);
}
pub fn walk_local<'a, V: Visitor<'a>>(visitor: &mut V, local: &'a Local) {
for attr in local.attrs.iter() {
visitor.visit_attribute(attr);
}
visitor.visit_pat(&local.pat);
walk_list!(visitor, visit_ty, &local.ty);
walk_list!(visitor, visit_expr, &local.init);
}
pub fn walk_label<'a, V: Visitor<'a>>(visitor: &mut V, label: &'a Label) {
visitor.visit_ident(label.ident);
}
pub fn walk_lifetime<'a, V: Visitor<'a>>(visitor: &mut V, lifetime: &'a Lifetime) {
visitor.visit_ident(lifetime.ident);
}
pub fn walk_poly_trait_ref<'a, V>(visitor: &mut V,
trait_ref: &'a PolyTraitRef,
_: &TraitBoundModifier)
where V: Visitor<'a>,
{
walk_list!(visitor, visit_generic_param, &trait_ref.bound_generic_params);
visitor.visit_trait_ref(&trait_ref.trait_ref);
}
pub fn walk_trait_ref<'a, V: Visitor<'a>>(visitor: &mut V, trait_ref: &'a TraitRef) {
visitor.visit_path(&trait_ref.path, trait_ref.ref_id)
}
pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
visitor.visit_vis(&item.vis);
visitor.visit_ident(item.ident);
match item.node {
ItemKind::ExternCrate(orig_name) => {
if let Some(orig_name) = orig_name {
visitor.visit_name(item.span, orig_name);
}
}
ItemKind::Use(ref use_tree) => {
visitor.visit_use_tree(use_tree, item.id, false)
}
ItemKind::Static(ref typ, _, ref expr) |
ItemKind::Const(ref typ, ref expr) => {
visitor.visit_ty(typ);
visitor.visit_expr(expr);
}
ItemKind::Fn(ref declaration, ref header, ref generics, ref body) => {
visitor.visit_generics(generics);
visitor.visit_fn_header(header);
visitor.visit_fn(FnKind::ItemFn(item.ident, header,
&item.vis, body),
declaration,
item.span,
item.id)
}
ItemKind::Mod(ref module) => {
visitor.visit_mod(module, item.span, &item.attrs, item.id)
}
ItemKind::ForeignMod(ref foreign_module) => {
walk_list!(visitor, visit_foreign_item, &foreign_module.items);
}
ItemKind::GlobalAsm(ref ga) => visitor.visit_global_asm(ga),
ItemKind::TyAlias(ref typ, ref generics) => {
visitor.visit_ty(typ);
visitor.visit_generics(generics)
}
ItemKind::OpaqueTy(ref bounds, ref generics) => {
walk_list!(visitor, visit_param_bound, bounds);
visitor.visit_generics(generics)
}
ItemKind::Enum(ref enum_definition, ref generics) => {
visitor.visit_generics(generics);
visitor.visit_enum_def(enum_definition, generics, item.id, item.span)
}
ItemKind::Impl(_, _, _,
ref generics,
ref opt_trait_reference,
ref typ,
ref impl_items) => {
visitor.visit_generics(generics);
walk_list!(visitor, visit_trait_ref, opt_trait_reference);
visitor.visit_ty(typ);
walk_list!(visitor, visit_impl_item, impl_items);
}
ItemKind::Struct(ref struct_definition, ref generics) |
ItemKind::Union(ref struct_definition, ref generics) => {
visitor.visit_generics(generics);
visitor.visit_variant_data(struct_definition, item.ident,
generics, item.id, item.span);
}
ItemKind::Trait(.., ref generics, ref bounds, ref methods) => {
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_trait_item, methods);
}
ItemKind::TraitAlias(ref generics, ref bounds) => {
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds);
}
ItemKind::Mac(ref mac) => visitor.visit_mac(mac),
ItemKind::MacroDef(ref ts) => visitor.visit_mac_def(ts, item.id),
}
walk_list!(visitor, visit_attribute, &item.attrs);
}
pub fn walk_enum_def<'a, V: Visitor<'a>>(visitor: &mut V,
enum_definition: &'a EnumDef,
generics: &'a Generics,
item_id: NodeId) {
walk_list!(visitor, visit_variant, &enum_definition.variants, generics, item_id);
}
pub fn walk_variant<'a, V>(visitor: &mut V,
variant: &'a Variant,
generics: &'a Generics,
item_id: NodeId)
where V: Visitor<'a>,
{
visitor.visit_ident(variant.ident);
visitor.visit_variant_data(&variant.data, variant.ident,
generics, item_id, variant.span);
walk_list!(visitor, visit_anon_const, &variant.disr_expr);
walk_list!(visitor, visit_attribute, &variant.attrs);
}
pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) {
match typ.node {
TyKind::Slice(ref ty) | TyKind::Paren(ref ty) => {
visitor.visit_ty(ty)
}
TyKind::Ptr(ref mutable_type) => {
visitor.visit_ty(&mutable_type.ty)
}
TyKind::Rptr(ref opt_lifetime, ref mutable_type) => {
walk_list!(visitor, visit_lifetime, opt_lifetime);
visitor.visit_ty(&mutable_type.ty)
}
TyKind::Never | TyKind::CVarArgs => {}
TyKind::Tup(ref tuple_element_types) => {
walk_list!(visitor, visit_ty, tuple_element_types);
}
TyKind::BareFn(ref function_declaration) => {
walk_list!(visitor, visit_generic_param, &function_declaration.generic_params);
walk_fn_decl(visitor, &function_declaration.decl);
}
TyKind::Path(ref maybe_qself, ref path) => {
if let Some(ref qself) = *maybe_qself {
visitor.visit_ty(&qself.ty);
}
visitor.visit_path(path, typ.id);
}
TyKind::Array(ref ty, ref length) => {
visitor.visit_ty(ty);
visitor.visit_anon_const(length)
}
TyKind::TraitObject(ref bounds, ..) |
TyKind::ImplTrait(_, ref bounds) => {
walk_list!(visitor, visit_param_bound, bounds);
}
TyKind::Typeof(ref expression) => {
visitor.visit_anon_const(expression)
}
TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {}
TyKind::Mac(ref mac) => {
visitor.visit_mac(mac)
}
}
}
pub fn walk_path<'a, V: Visitor<'a>>(visitor: &mut V, path: &'a Path) {
for segment in &path.segments {
visitor.visit_path_segment(path.span, segment);
}
}
pub fn walk_use_tree<'a, V: Visitor<'a>>(
visitor: &mut V, use_tree: &'a UseTree, id: NodeId,
) {
visitor.visit_path(&use_tree.prefix, id);
match use_tree.kind {
UseTreeKind::Simple(rename, ..) => {
// the extra IDs are handled during HIR lowering
if let Some(rename) = rename {
visitor.visit_ident(rename);
}
}
UseTreeKind::Glob => {},
UseTreeKind::Nested(ref use_trees) => {
for &(ref nested_tree, nested_id) in use_trees {
visitor.visit_use_tree(nested_tree, nested_id, true);
}
}
}
}
pub fn walk_path_segment<'a, V: Visitor<'a>>(visitor: &mut V,
path_span: Span,
segment: &'a PathSegment) {
visitor.visit_ident(segment.ident);
if let Some(ref args) = segment.args {
visitor.visit_generic_args(path_span, args);
}
}
pub fn walk_generic_args<'a, V>(visitor: &mut V,
_path_span: Span,
generic_args: &'a GenericArgs)
where V: Visitor<'a>,
{
match *generic_args {
GenericArgs::AngleBracketed(ref data) => {
walk_list!(visitor, visit_generic_arg, &data.args);
walk_list!(visitor, visit_assoc_ty_constraint, &data.constraints);
}
GenericArgs::Parenthesized(ref data) => {
walk_list!(visitor, visit_ty, &data.inputs);
walk_list!(visitor, visit_ty, &data.output);
}
}
}
pub fn walk_assoc_ty_constraint<'a, V: Visitor<'a>>(visitor: &mut V,
constraint: &'a AssocTyConstraint) {
visitor.visit_ident(constraint.ident);
match constraint.kind {
AssocTyConstraintKind::Equality { ref ty } => {
visitor.visit_ty(ty);
}
AssocTyConstraintKind::Bound { ref bounds } => {
walk_list!(visitor, visit_param_bound, bounds);
}
}
}
pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) {
match pattern.node {
PatKind::TupleStruct(ref path, ref elems) => {
visitor.visit_path(path, pattern.id);
walk_list!(visitor, visit_pat, elems);
}
PatKind::Path(ref opt_qself, ref path) => {
if let Some(ref qself) = *opt_qself {
visitor.visit_ty(&qself.ty);
}
visitor.visit_path(path, pattern.id)
}
PatKind::Struct(ref path, ref fields, _) => {
visitor.visit_path(path, pattern.id);
for field in fields {
walk_list!(visitor, visit_attribute, field.attrs.iter());
visitor.visit_ident(field.ident);
visitor.visit_pat(&field.pat)
}
}
PatKind::Box(ref subpattern) |
PatKind::Ref(ref subpattern, _) |
PatKind::Paren(ref subpattern) => {
visitor.visit_pat(subpattern)
}
PatKind::Ident(_, ident, ref optional_subpattern) => {
visitor.visit_ident(ident);
walk_list!(visitor, visit_pat, optional_subpattern);
}
PatKind::Lit(ref expression) => visitor.visit_expr(expression),
PatKind::Range(ref lower_bound, ref upper_bound, _) => {
visitor.visit_expr(lower_bound);
visitor.visit_expr(upper_bound);
}
PatKind::Wild | PatKind::Rest => {},
PatKind::Tuple(ref elems)
| PatKind::Slice(ref elems)
| PatKind::Or(ref elems) => {
walk_list!(visitor, visit_pat, elems);
}
PatKind::Mac(ref mac) => visitor.visit_mac(mac),
}
}
pub fn walk_foreign_item<'a, V: Visitor<'a>>(visitor: &mut V, foreign_item: &'a ForeignItem) {
visitor.visit_vis(&foreign_item.vis);
visitor.visit_ident(foreign_item.ident);
match foreign_item.node {
ForeignItemKind::Fn(ref function_declaration, ref generics) => {
walk_fn_decl(visitor, function_declaration);
visitor.visit_generics(generics)
}
ForeignItemKind::Static(ref typ, _) => visitor.visit_ty(typ),
ForeignItemKind::Ty => (),
ForeignItemKind::Macro(ref mac) => visitor.visit_mac(mac),
}
walk_list!(visitor, visit_attribute, &foreign_item.attrs);
}
pub fn walk_global_asm<'a, V: Visitor<'a>>(_: &mut V, _: &'a GlobalAsm) {
// Empty!
}
pub fn walk_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a GenericBound) {
match *bound {
GenericBound::Trait(ref typ, ref modifier) => visitor.visit_poly_trait_ref(typ, modifier),
GenericBound::Outlives(ref lifetime) => visitor.visit_lifetime(lifetime),
}
}
pub fn walk_generic_param<'a, V: Visitor<'a>>(visitor: &mut V, param: &'a GenericParam) {
visitor.visit_ident(param.ident);
walk_list!(visitor, visit_attribute, param.attrs.iter());
walk_list!(visitor, visit_param_bound, ¶m.bounds);
match param.kind {
GenericParamKind::Lifetime => (),
GenericParamKind::Type { ref default } => walk_list!(visitor, visit_ty, default),
GenericParamKind::Const { ref ty, .. } => visitor.visit_ty(ty),
}
}
pub fn walk_generics<'a, V: Visitor<'a>>(visitor: &mut V, generics: &'a Generics) {
walk_list!(visitor, visit_generic_param, &generics.params);
walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates);
}
pub fn walk_where_predicate<'a, V: Visitor<'a>>(visitor: &mut V, predicate: &'a WherePredicate) {
match *predicate {
WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty,
ref bounds,
ref bound_generic_params,
..}) => {
visitor.visit_ty(bounded_ty);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_generic_param, bound_generic_params);
}
WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime,
ref bounds,
..}) => {
visitor.visit_lifetime(lifetime);
walk_list!(visitor, visit_param_bound, bounds);
}
WherePredicate::EqPredicate(WhereEqPredicate{ref lhs_ty,
ref rhs_ty,
..}) => {
visitor.visit_ty(lhs_ty);
visitor.visit_ty(rhs_ty);
}
}
}
pub fn walk_fn_ret_ty<'a, V: Visitor<'a>>(visitor: &mut V, ret_ty: &'a FunctionRetTy) {
if let FunctionRetTy::Ty(ref output_ty) = *ret_ty {
visitor.visit_ty(output_ty)
}
}
pub fn walk_fn_decl<'a, V: Visitor<'a>>(visitor: &mut V, function_declaration: &'a FnDecl) {
for arg in &function_declaration.inputs {
visitor.visit_arg(arg);
}
visitor.visit_fn_ret_ty(&function_declaration.output);
}
pub fn walk_fn<'a, V>(visitor: &mut V, kind: FnKind<'a>, declaration: &'a FnDecl, _span: Span)
where V: Visitor<'a>,
{
match kind {
FnKind::ItemFn(_, header, _, body) => {
visitor.visit_fn_header(header);
walk_fn_decl(visitor, declaration);
visitor.visit_block(body);
}
FnKind::Method(_, sig, _, body) => {
visitor.visit_fn_header(&sig.header);
walk_fn_decl(visitor, declaration);
visitor.visit_block(body);
}
FnKind::Closure(body) => {
walk_fn_decl(visitor, declaration);
visitor.visit_expr(body);
}
}
}
pub fn walk_trait_item<'a, V: Visitor<'a>>(visitor: &mut V, trait_item: &'a TraitItem) {
visitor.visit_ident(trait_item.ident);
walk_list!(visitor, visit_attribute, &trait_item.attrs);
visitor.visit_generics(&trait_item.generics);
match trait_item.node {
TraitItemKind::Const(ref ty, ref default) => {
visitor.visit_ty(ty);
walk_list!(visitor, visit_expr, default);
}
TraitItemKind::Method(ref sig, None) => {
visitor.visit_fn_header(&sig.header);
walk_fn_decl(visitor, &sig.decl);
}
TraitItemKind::Method(ref sig, Some(ref body)) => {
visitor.visit_fn(FnKind::Method(trait_item.ident, sig, None, body),
&sig.decl, trait_item.span, trait_item.id);
}
TraitItemKind::Type(ref bounds, ref default) => {
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_ty, default);
}
TraitItemKind::Macro(ref mac) => {
visitor.visit_mac(mac);
}
}
}
pub fn walk_impl_item<'a, V: Visitor<'a>>(visitor: &mut V, impl_item: &'a ImplItem) {
visitor.visit_vis(&impl_item.vis);
visitor.visit_ident(impl_item.ident);
walk_list!(visitor, visit_attribute, &impl_item.attrs);
visitor.visit_generics(&impl_item.generics);
match impl_item.node {
ImplItemKind::Const(ref ty, ref expr) => {
visitor.visit_ty(ty);
visitor.visit_expr(expr);
}
ImplItemKind::Method(ref sig, ref body) => {
visitor.visit_fn(FnKind::Method(impl_item.ident, sig, Some(&impl_item.vis), body),
&sig.decl, impl_item.span, impl_item.id);
}
ImplItemKind::TyAlias(ref ty) => {
visitor.visit_ty(ty);
}
ImplItemKind::OpaqueTy(ref bounds) => {
walk_list!(visitor, visit_param_bound, bounds);
}
ImplItemKind::Macro(ref mac) => {
visitor.visit_mac(mac);
}
}
}
pub fn walk_struct_def<'a, V: Visitor<'a>>(visitor: &mut V, struct_definition: &'a VariantData) {
walk_list!(visitor, visit_struct_field, struct_definition.fields());
}
pub fn walk_struct_field<'a, V: Visitor<'a>>(visitor: &mut V, struct_field: &'a StructField) {
visitor.visit_vis(&struct_field.vis);
if let Some(ident) = struct_field.ident {
visitor.visit_ident(ident);
}
visitor.visit_ty(&struct_field.ty);
walk_list!(visitor, visit_attribute, &struct_field.attrs);
}
pub fn walk_block<'a, V: Visitor<'a>>(visitor: &mut V, block: &'a Block) {
walk_list!(visitor, visit_stmt, &block.stmts);
}
pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) {
match statement.node {
StmtKind::Local(ref local) => visitor.visit_local(local),
StmtKind::Item(ref item) => visitor.visit_item(item),
StmtKind::Expr(ref expression) | StmtKind::Semi(ref expression) => {
visitor.visit_expr(expression)
}
StmtKind::Mac(ref mac) => {
let (ref mac, _, ref attrs) = **mac;
visitor.visit_mac(mac);
for attr in attrs.iter() {
visitor.visit_attribute(attr);
}
}
}
}
pub fn walk_mac<'a, V: Visitor<'a>>(visitor: &mut V, mac: &'a Mac) {
visitor.visit_path(&mac.path, DUMMY_NODE_ID);
}
pub fn walk_anon_const<'a, V: Visitor<'a>>(visitor: &mut V, constant: &'a AnonConst) {
visitor.visit_expr(&constant.value);
}
pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) {
for attr in expression.attrs.iter() {
visitor.visit_attribute(attr);
}
match expression.node {
ExprKind::Box(ref subexpression) => {
visitor.visit_expr(subexpression)
}
ExprKind::Array(ref subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
ExprKind::Repeat(ref element, ref count) => {
visitor.visit_expr(element);
visitor.visit_anon_const(count)
}
ExprKind::Struct(ref path, ref fields, ref optional_base) => {
visitor.visit_path(path, expression.id);
for field in fields {
walk_list!(visitor, visit_attribute, field.attrs.iter());
visitor.visit_ident(field.ident);
visitor.visit_expr(&field.expr)
}
walk_list!(visitor, visit_expr, optional_base);
}
ExprKind::Tup(ref subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
ExprKind::Call(ref callee_expression, ref arguments) => {
visitor.visit_expr(callee_expression);
walk_list!(visitor, visit_expr, arguments);
}
ExprKind::MethodCall(ref segment, ref arguments) => {
visitor.visit_path_segment(expression.span, segment);
walk_list!(visitor, visit_expr, arguments);
}
ExprKind::Binary(_, ref left_expression, ref right_expression) => {
visitor.visit_expr(left_expression);
visitor.visit_expr(right_expression)
}
ExprKind::AddrOf(_, ref subexpression) | ExprKind::Unary(_, ref subexpression) => {
visitor.visit_expr(subexpression)
}
ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => {
visitor.visit_expr(subexpression);
visitor.visit_ty(typ)
}
ExprKind::Let(ref pats, ref scrutinee) => {
walk_list!(visitor, visit_pat, pats);
visitor.visit_expr(scrutinee);
}
ExprKind::If(ref head_expression, ref if_block, ref optional_else) => {
visitor.visit_expr(head_expression);
visitor.visit_block(if_block);
walk_list!(visitor, visit_expr, optional_else);
}
ExprKind::While(ref subexpression, ref block, ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_expr(subexpression);
visitor.visit_block(block);
}
ExprKind::ForLoop(ref pattern, ref subexpression, ref block, ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_pat(pattern);
visitor.visit_expr(subexpression);
visitor.visit_block(block);
}
ExprKind::Loop(ref block, ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_block(block);
}
ExprKind::Match(ref subexpression, ref arms) => {
visitor.visit_expr(subexpression);
walk_list!(visitor, visit_arm, arms);
}
ExprKind::Closure(_, _, _, ref function_declaration, ref body, _decl_span) => {
visitor.visit_fn(FnKind::Closure(body),
function_declaration,
expression.span,
expression.id)
}
ExprKind::Block(ref block, ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_block(block);
}
ExprKind::Async(_, _, ref body) => {
visitor.visit_block(body);
}
ExprKind::Await(ref expr) => visitor.visit_expr(expr),
ExprKind::Assign(ref left_hand_expression, ref right_hand_expression) => {
visitor.visit_expr(left_hand_expression);
visitor.visit_expr(right_hand_expression);
}
ExprKind::AssignOp(_, ref left_expression, ref right_expression) => {
visitor.visit_expr(left_expression);
visitor.visit_expr(right_expression);
}
ExprKind::Field(ref subexpression, ident) => {
visitor.visit_expr(subexpression);
visitor.visit_ident(ident);
}
ExprKind::Index(ref main_expression, ref index_expression) => {
visitor.visit_expr(main_expression);
visitor.visit_expr(index_expression)
}
ExprKind::Range(ref start, ref end, _) => {
walk_list!(visitor, visit_expr, start);
walk_list!(visitor, visit_expr, end);
}
ExprKind::Path(ref maybe_qself, ref path) => {
if let Some(ref qself) = *maybe_qself {
visitor.visit_ty(&qself.ty);
}
visitor.visit_path(path, expression.id)
}
ExprKind::Break(ref opt_label, ref opt_expr) => {
walk_list!(visitor, visit_label, opt_label);
walk_list!(visitor, visit_expr, opt_expr);
}
ExprKind::Continue(ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
}
ExprKind::Ret(ref optional_expression) => {
walk_list!(visitor, visit_expr, optional_expression);
}
ExprKind::Mac(ref mac) => visitor.visit_mac(mac),
ExprKind::Paren(ref subexpression) => {
visitor.visit_expr(subexpression)
}
ExprKind::InlineAsm(ref ia) => {
for &(_, ref input) in &ia.inputs {
visitor.visit_expr(input)
}
for output in &ia.outputs {
visitor.visit_expr(&output.expr)
}
}
ExprKind::Yield(ref optional_expression) => {
walk_list!(visitor, visit_expr, optional_expression);
}
ExprKind::Try(ref subexpression) => {
visitor.visit_expr(subexpression)
}
ExprKind::TryBlock(ref body) => {
visitor.visit_block(body)
}
ExprKind::Lit(_) | ExprKind::Err => {}
}
visitor.visit_expr_post(expression)
}
pub fn walk_arg<'a, V: Visitor<'a>>(visitor: &mut V, arg: &'a Arg) {
walk_list!(visitor, visit_attribute, arg.attrs.iter());
visitor.visit_pat(&arg.pat);
visitor.visit_ty(&arg.ty);
}
pub fn walk_arm<'a, V: Visitor<'a>>(visitor: &mut V, arm: &'a Arm) {
walk_list!(visitor, visit_pat, &arm.pats);
if let Some(ref e) = &arm.guard {
visitor.visit_expr(e);
}
visitor.visit_expr(&arm.body);
walk_list!(visitor, visit_attribute, &arm.attrs);
}
pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) {
if let VisibilityKind::Restricted { ref path, id } = vis.node {
visitor.visit_path(path, id);
}
}
pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute) {
visitor.visit_tts(attr.tokens.clone());
}
pub fn walk_tt<'a, V: Visitor<'a>>(visitor: &mut V, tt: TokenTree) {
match tt {
TokenTree::Token(token) => visitor.visit_token(token),
TokenTree::Delimited(_, _, tts) => visitor.visit_tts(tts),
}
}
pub fn walk_tts<'a, V: Visitor<'a>>(visitor: &mut V, tts: TokenStream) {
for tt in tts.trees() {
visitor.visit_tt(tt);
}
}
| 38.95814 | 100 | 0.590347 |
f55a151b810bbbc933754817538fc0f1535a4c72 | 22,486 | extern crate http;
extern crate s3;
use std::str;
use s3::bucket::Bucket;
use s3::creds::Credentials;
use s3::error::S3Error;
use s3::region::Region;
use crate::address::ParsedAddress;
use crate::bar::WrappedBar;
use crate::consts::*;
use crate::error::HTTPHeaderError;
use crate::error::ValidateError;
use crate::hash::HashChecker;
use crate::question::*;
use crate::tls::*;
struct Storage {
_name: String,
region: Region,
credentials: Credentials,
_bucket: String,
_location_supported: bool,
}
pub struct S3;
impl S3 {
pub async fn get(
input: &str,
output: &str,
bar: &mut WrappedBar,
expected_sha256: &str,
) -> Result<(), ValidateError> {
S3::_get(input, output, bar).await.unwrap();
HashChecker::check(output, expected_sha256)
}
pub async fn put(input: &str, output: &str, bar: WrappedBar) -> Result<(), ValidateError> {
let (output, bucket) = S3::setup(output, bar.silent).await;
let mut async_input_file = tokio::fs::File::open(input) //TODO: when s3 provider crate has stream support implementing futures_core::stream::Stream used in resume, use io.rs::get_output() instead.
.await
.expect("Unable to open input file");
let _ = bucket
.put_object_stream(&mut async_input_file, output)
.await
.unwrap();
Ok(())
}
async fn _get(input: &str, output: &str, bar: &mut WrappedBar) -> Result<(), HTTPHeaderError> {
let (input, bucket) = S3::setup(input, bar.silent).await;
let mut async_output_file = tokio::fs::File::create(output) //TODO: when s3 provider crate has stream support implementing futures_core::stream::Stream used in resume, use io.rs::get_output() instead.
.await
.expect("Unable to open output file");
let _ = bucket
.get_object_stream(input, &mut async_output_file)
.await
.unwrap();
Ok(())
}
async fn setup(io: &str, silent: bool) -> (String, s3::bucket::Bucket) {
let parsed_address = ParsedAddress::parse_address(io, silent);
let io = S3::get_path_in_bucket(&parsed_address);
let bucket = S3::get_bucket(&parsed_address);
let transport = S3::_get_transport::<TLS, QuestionWrapped>(&parsed_address.server);
let fqdn = transport.to_string() + &parsed_address.server;
let bucket_kind = S3::_get_header(&fqdn, HTTP_HEADER_SERVER).await.unwrap();
let (username, password) = S3::get_credentials(&parsed_address, silent);
let backend = S3::new(&bucket_kind, &username, &password, &bucket, &fqdn);
let bucket = Bucket::new(bucket, backend.region, backend.credentials)
.unwrap()
.with_path_style();
(io, bucket)
}
fn get_credentials(parsed_address: &ParsedAddress, silent: bool) -> (String, String) {
let result = (
parsed_address.username.to_owned(),
parsed_address.password.to_owned(),
);
let result = S3::mixin_aws_credentials_from_aws_folder(result.0, result.1, silent);
let result = S3::mixin_aws_credentials_from_env(result.0, result.1, silent);
(result.0, result.1)
}
fn mixin_aws_credentials_from_aws_folder(
username: String,
password: String,
silent: bool,
) -> (String, String) {
let mut result = (username, password);
if let Ok(creds_from_profile) = Credentials::from_profile(None) {
if !silent {
println!("🔑 Parsed AWS credentials from ~/.aws/credentials.");
}
result = (
creds_from_profile.access_key.unwrap(),
creds_from_profile.secret_key.unwrap(),
);
}
return result;
}
fn mixin_aws_credentials_from_env(
username: String,
password: String,
silent: bool,
) -> (String, String) {
let mut result = (username, password);
if let Ok(creds_from_profile) = Credentials::from_env() {
if !silent {
println!("🔑 Parsed AWS credentials from environment vars AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.");
}
result = (
creds_from_profile.access_key.unwrap(),
creds_from_profile.secret_key.unwrap(),
);
}
return result;
}
fn get_path_in_bucket(parsed_address: &ParsedAddress) -> String {
let mut result = "/".to_string();
if parsed_address.path_segments.len() > 1 {
result += &parsed_address.path_segments[1..].join("/");
result += "/";
}
result += &parsed_address.file;
return result;
}
fn get_bucket(parsed_address: &ParsedAddress) -> &str {
let bucket: &str = match parsed_address.path_segments.len() {
0 => &parsed_address.file,
_ => &parsed_address.path_segments[0],
};
bucket
}
async fn _list(bucket: &Bucket) -> Result<(), S3Error> {
let buckets = bucket.list("".to_string(), None).await?;
for bucket in buckets {
for content in bucket.contents {
println!("{}", content.key);
}
}
Ok(())
}
async fn _get_header(server: &str, header: &str) -> Result<String, HTTPHeaderError> {
let client = reqwest::Client::new();
let res = client.post(server).send().await.unwrap();
let result = res
.headers()
.get(header)
.ok_or(HTTPHeaderError::NotPresent)?;
Ok(result.to_str().unwrap().to_lowercase().to_string())
}
fn _get_transport<T: TLSTrait, Q: QuestionTrait>(server: &str) -> &str {
let parts: Vec<&str> = server.split(":").collect();
assert_eq!(parts.len(), 2, "No port in URL. Stopping.");
let host = parts[0];
let port = parts[1];
if T::has_tls(host, port) {
return "https://";
} else {
if Q::yes_no() {
return "http://";
} else {
return "";
}
}
}
async fn _put_string(
bucket: &Bucket,
destination_file: &str,
string: &str,
) -> Result<(), S3Error> {
let (_, _) = bucket.delete_object(destination_file).await?;
let (_, _) = bucket
.put_object(destination_file, string.as_bytes())
.await?;
Ok(())
}
async fn _get_string(bucket: &Bucket, source_file: &str) -> Result<String, S3Error> {
let (data, _) = bucket.get_object(source_file).await?;
let string = str::from_utf8(&data)?;
Ok(string.to_string())
}
fn new(
kind: &str,
access_key: &str,
secret_key: &str,
bucket: &str,
endpoint: &str,
) -> Storage {
let storage = match kind {
"minio" => Storage {
_name: "minio".into(),
region: Region::Custom {
region: "".into(),
endpoint: endpoint.into(),
},
credentials: Credentials {
access_key: Some(access_key.to_owned()),
secret_key: Some(secret_key.to_owned()),
security_token: None,
session_token: None,
},
_bucket: bucket.to_string(),
_location_supported: false,
},
"aws" => Storage {
_name: "aws".into(),
region: "eu-central-1".parse().unwrap(),
credentials: Credentials {
access_key: Some(access_key.to_owned()),
secret_key: Some(secret_key.to_owned()),
security_token: None,
session_token: None,
},
_bucket: bucket.to_string(),
_location_supported: true,
},
_ => Storage {
_name: "".into(),
region: "".parse().unwrap(),
credentials: Credentials {
access_key: Some(access_key.to_owned()),
secret_key: Some(secret_key.to_owned()),
security_token: None,
session_token: None,
},
_bucket: bucket.to_string(),
_location_supported: false,
},
};
return storage;
}
}
#[cfg(test)]
mod tests {
use super::*;
use serial_test::serial;
fn just_start(justfile: &str) {
use std::env;
use std::io::{self, Write};
use std::process::Command;
let output = Command::new("just")
.args([
"--justfile",
justfile,
"_start",
env::current_dir().unwrap().to_str().unwrap(),
])
.output()
.expect("failed to just _start");
println!("status: {}", output.status);
io::stdout().write_all(&output.stdout).unwrap();
io::stderr().write_all(&output.stderr).unwrap();
}
fn just_stop(justfile: &str) {
use std::env;
use std::process::Command;
let _ = Command::new("just")
.args([
"--justfile",
justfile,
"_stop",
env::current_dir().unwrap().to_str().unwrap(),
])
.output();
}
#[tokio::test]
#[serial]
async fn test_list_bucket_works_when_typical() {
just_start("test/s3/Justfile");
let parsed_address = ParsedAddress {
server: "localhost:9000".to_string(),
username: "minioadmin".to_string(),
password: "minioadmin".to_string(),
path_segments: vec!["test-bucket".to_string()],
file: "".to_string(),
};
let bucket = S3::get_bucket(&parsed_address);
let transport = S3::_get_transport::<TLS, QuestionWrapped>(&parsed_address.server);
let fqdn = transport.to_string() + &parsed_address.server;
let bucket_kind = S3::_get_header(&fqdn, HTTP_HEADER_SERVER).await.unwrap();
let backend = S3::new(
&bucket_kind,
&parsed_address.username,
&parsed_address.password,
&bucket,
&fqdn,
);
let bucket = Bucket::new(bucket, backend.region, backend.credentials)
.unwrap()
.with_path_style();
assert!(S3::_list(&bucket).await.is_ok());
just_stop("test/s3/Justfile");
}
#[tokio::test]
#[serial]
async fn test_put_string_works_when_typical() {
just_start("test/s3/Justfile");
let parsed_address = ParsedAddress {
server: "localhost:9000".to_string(),
username: "minioadmin".to_string(),
password: "minioadmin".to_string(),
path_segments: vec!["test-bucket".to_string()],
file: "".to_string(),
};
let bucket = S3::get_bucket(&parsed_address);
let transport = S3::_get_transport::<TLS, QuestionWrapped>(&parsed_address.server);
let fqdn = transport.to_string() + &parsed_address.server;
let bucket_kind = S3::_get_header(&fqdn, HTTP_HEADER_SERVER).await.unwrap();
let backend = S3::new(
&bucket_kind,
&parsed_address.username,
&parsed_address.password,
&bucket,
&fqdn,
);
let bucket = Bucket::new(bucket, backend.region, backend.credentials)
.unwrap()
.with_path_style();
assert!(S3::_put_string(
&bucket,
"test_put_string_works_when_typical",
"This is the string from test_put_string_works_when_typical."
)
.await
.is_ok());
just_stop("test/s3/Justfile");
}
#[tokio::test]
#[serial]
async fn test_get_string_works_when_typical() {
just_start("test/s3/Justfile");
let parsed_address = ParsedAddress {
server: "localhost:9000".to_string(),
username: "minioadmin".to_string(),
password: "minioadmin".to_string(),
path_segments: vec!["test-bucket".to_string()],
file: "".to_string(),
};
let bucket = S3::get_bucket(&parsed_address);
let transport = S3::_get_transport::<TLS, QuestionWrapped>(&parsed_address.server);
let fqdn = transport.to_string() + &parsed_address.server;
let bucket_kind = S3::_get_header(&fqdn, HTTP_HEADER_SERVER).await.unwrap();
let backend = S3::new(
&bucket_kind,
&parsed_address.username,
&parsed_address.password,
&bucket,
&fqdn,
);
let bucket = Bucket::new(bucket, backend.region, backend.credentials)
.unwrap()
.with_path_style();
let _ = S3::_put_string(
&bucket,
"test_put_string_works_when_typical",
"This is the string from test_put_string_works_when_typical.",
)
.await
.is_ok();
assert_eq!(
S3::_get_string(&bucket, "test_put_string_works_when_typical")
.await
.unwrap(),
"This is the string from test_put_string_works_when_typical."
);
just_stop("test/s3/Justfile");
}
}
#[test]
fn test_get_bucket_works_when_typical() {
let parsed_address = ParsedAddress {
server: "".to_string(),
username: "".to_string(),
password: "".to_string(),
path_segments: vec!["test-bucket".to_string()],
file: "".to_string(),
};
assert_eq!(S3::get_bucket(&parsed_address), "test-bucket");
}
#[test]
fn test_get_bucket_works_when_multiple_segments() {
let parsed_address = ParsedAddress {
server: "".to_string(),
username: "".to_string(),
password: "".to_string(),
path_segments: vec!["test-bucket".to_string(), "test-file".to_string()],
file: "".to_string(),
};
assert_eq!(S3::get_bucket(&parsed_address), "test-bucket");
}
#[test]
fn test_get_transport_returns_http_transport_when_no_tls() {
use crate::question::*;
pub struct TlsMockNoTLS;
impl TLSTrait for TlsMockNoTLS {
fn has_tls(_host: &str, _port: &str) -> bool {
false
}
}
assert_eq!(
S3::_get_transport::<TlsMockNoTLS, QuestionWrapped>("dummyhost:9000"),
"http://"
);
}
#[test]
fn test_get_transport_returns_https_transport_when_has_tls() {
use crate::question::*;
pub struct TlsMockHasTLS;
impl TLSTrait for TlsMockHasTLS {
fn has_tls(_host: &str, _port: &str) -> bool {
true
}
}
assert_eq!(
S3::_get_transport::<TlsMockHasTLS, QuestionWrapped>("dummyhost:9000"),
"https://"
);
}
#[test]
fn test_get_transport_returns_no_transport_when_no_tls() {
use crate::question::*;
pub struct TlsMockHasTLS;
impl TLSTrait for TlsMockHasTLS {
fn has_tls(_host: &str, _port: &str) -> bool {
false
}
}
struct QuestionWrappedMock;
impl QuestionTrait for QuestionWrappedMock {
fn yes_no() -> bool {
false
}
}
assert_eq!(
S3::_get_transport::<TlsMockHasTLS, QuestionWrappedMock>("dummyhost:9000"),
""
);
}
#[should_panic]
#[tokio::test]
async fn test_get_transport_bucket_panics_when_no_port() {
let parsed_address = ParsedAddress {
server: "localhost".to_string(),
username: "".to_string(),
password: "".to_string(),
path_segments: vec!["test-bucket".to_string()],
file: "".to_string(),
};
let _ = S3::_get_transport::<TLS, QuestionWrapped>(&parsed_address.server);
}
#[test]
fn test_storage_new_minio() {
let storage = S3::new("minio", "user", "pass", "bucket", "fqdn");
assert_eq!(storage._location_supported, false);
}
#[test]
fn test_storage_new_aws() {
let storage = S3::new("aws", "user", "pass", "bucket", "fqdn");
assert_eq!(storage._location_supported, true);
}
#[test]
fn test_storage_new_default() {
let storage = S3::new("unknown", "user", "pass", "bucket", "fqdn");
assert_eq!(storage._location_supported, false);
}
#[test]
fn test_get_path_in_bucket_works_when_typical() {
let parsed_address = ParsedAddress {
server: "".to_string(),
username: "".to_string(),
password: "".to_string(),
path_segments: vec!["test-bucket".to_string()],
file: "test-file".to_string(),
};
let path = S3::get_path_in_bucket(&parsed_address);
assert_eq!(path, "/test-file");
}
#[test]
fn test_get_path_in_bucket_works_when_full_url() {
let parsed_address = ParsedAddress::parse_address(
"s3://minioadmin:minioadmin@localhost:9000/test-bucket/test.file",
true,
);
let path = S3::get_path_in_bucket(&parsed_address);
assert_eq!(path, "/test.file");
}
#[test]
fn test_get_path_in_bucket_works_when_in_subfolder() {
let parsed_address = ParsedAddress::parse_address(
"s3://minioadmin:minioadmin@localhost:9000/test-bucket/subfolder/test.file",
true,
);
let path = S3::get_path_in_bucket(&parsed_address);
assert_eq!(path, "/subfolder/test.file");
}
#[cfg(test)]
mod test_mixins {
use super::*;
use serial_test::serial;
#[test]
#[serial]
fn test_mixin_aws_credentials_from_aws_folder_works_when_typical() {
use crate::untildify::untildify;
use std::fs::OpenOptions;
use std::io::Write;
let _ = std::fs::rename(untildify("~/.aws"), untildify("~/.aws_aim_testing"));
std::fs::create_dir(untildify("~/.aws")).unwrap();
let mut file = OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(untildify("~/.aws/credentials"))
.unwrap();
file.write_all(b"[default]\n").unwrap();
file.write_all(b"aws_access_key_id = credentials_user\n")
.unwrap();
file.write_all(b"aws_secret_access_key = credentials_pass")
.unwrap();
let (username, password) =
S3::mixin_aws_credentials_from_aws_folder("".to_string(), "".to_string(), true);
std::fs::remove_dir_all(untildify("~/.aws")).unwrap();
let _ = std::fs::rename(untildify("~/.aws_aim_testing"), untildify("~/.aws"));
assert_eq!(
(username, password),
(
"credentials_user".to_string(),
"credentials_pass".to_string()
)
);
}
#[test]
#[serial]
fn test_mixin_aws_credentials_from_aws_folder_works_when_typical_and_not_silent() {
use crate::untildify::untildify;
use std::fs::OpenOptions;
use std::io::Write;
let _ = std::fs::rename(untildify("~/.aws"), untildify("~/.aws_aim_testing"));
std::fs::create_dir(untildify("~/.aws")).unwrap();
let mut file = OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(untildify("~/.aws/credentials"))
.unwrap();
file.write_all(b"[default]\n").unwrap();
file.write_all(b"aws_access_key_id = credentials_user\n")
.unwrap();
file.write_all(b"aws_secret_access_key = credentials_pass")
.unwrap();
let (username, password) =
S3::mixin_aws_credentials_from_aws_folder("".to_string(), "".to_string(), false);
std::fs::remove_dir_all(untildify("~/.aws")).unwrap();
let _ = std::fs::rename(untildify("~/.aws_aim_testing"), untildify("~/.aws"));
assert_eq!(
(username, password),
(
"credentials_user".to_string(),
"credentials_pass".to_string()
)
);
}
#[test]
#[serial]
fn test_mixin_aws_credentials_from_env_works_when_typical() {
use std::env;
let old_access_key = env::var("AWS_ACCESS_KEY_ID").unwrap_or("".to_string());
let old_secret_key = env::var("AWS_SECRET_ACCESS_KEY").unwrap_or("".to_string());
env::set_var("AWS_ACCESS_KEY_ID", "myaccesskey");
env::set_var("AWS_SECRET_ACCESS_KEY", "mysecretkey");
let (username, password) =
S3::mixin_aws_credentials_from_env("".to_string(), "".to_string(), true);
env::set_var("AWS_ACCESS_KEY_ID", old_access_key);
env::set_var("AWS_SECRET_ACCESS_KEY", old_secret_key);
assert_eq!(
(username, password),
("myaccesskey".to_string(), "mysecretkey".to_string())
);
}
#[test]
#[serial]
fn test_mixin_aws_credentials_from_env_works_when_typical_and_not_silent() {
use std::env;
let old_access_key = env::var("AWS_ACCESS_KEY_ID").unwrap_or("".to_string());
let old_secret_key = env::var("AWS_SECRET_ACCESS_KEY").unwrap_or("".to_string());
env::set_var("AWS_ACCESS_KEY_ID", "myaccesskey");
env::set_var("AWS_SECRET_ACCESS_KEY", "mysecretkey");
let (username, password) =
S3::mixin_aws_credentials_from_env("".to_string(), "".to_string(), false);
env::set_var("AWS_ACCESS_KEY_ID", old_access_key);
env::set_var("AWS_SECRET_ACCESS_KEY", old_secret_key);
assert_eq!(
(username, password),
("myaccesskey".to_string(), "mysecretkey".to_string())
);
}
}
#[test]
fn test_get_credentials_works_when_tyipical() {
let parsed_address = ParsedAddress::parse_address(
"s3://user:pass@localhost:9000/test-bucket/subfolder/test.file",
true,
);
let (username, password) = S3::get_credentials(&parsed_address, true);
assert_eq!(
(username, password),
("user".to_string(), "pass".to_string())
)
}
#[test]
fn test_get_credentials_works_when_tyipical_and_not_silent() {
let parsed_address = ParsedAddress::parse_address(
"s3://user:pass@localhost:9000/test-bucket/subfolder/test.file",
true,
);
let (username, password) = S3::get_credentials(&parsed_address, false);
assert_eq!(
(username, password),
("user".to_string(), "pass".to_string())
)
}
| 32.077033 | 208 | 0.571467 |
ed12c1b3bc9c1de1461b3c657a97eef8eebfc24d | 2,868 | use crate::build::Builder;
use crate::build::matches::MatchPair;
use crate::hair::*;
use rustc::mir::*;
use std::u32;
use std::convert::TryInto;
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn field_match_pairs<'pat>(&mut self,
place: Place<'tcx>,
subpatterns: &'pat [FieldPattern<'tcx>])
-> Vec<MatchPair<'pat, 'tcx>> {
subpatterns.iter()
.map(|fieldpat| {
let place = place.clone().field(fieldpat.field,
fieldpat.pattern.ty);
MatchPair::new(place, &fieldpat.pattern)
})
.collect()
}
pub fn prefix_slice_suffix<'pat>(&mut self,
match_pairs: &mut Vec<MatchPair<'pat, 'tcx>>,
place: &Place<'tcx>,
prefix: &'pat [Pattern<'tcx>],
opt_slice: Option<&'pat Pattern<'tcx>>,
suffix: &'pat [Pattern<'tcx>]) {
let min_length = prefix.len() + suffix.len();
let min_length = min_length.try_into().unwrap();
match_pairs.extend(
prefix.iter()
.enumerate()
.map(|(idx, subpattern)| {
let elem = ProjectionElem::ConstantIndex {
offset: idx as u32,
min_length,
from_end: false,
};
let place = place.clone().elem(elem);
MatchPair::new(place, subpattern)
})
);
if let Some(subslice_pat) = opt_slice {
let subslice = place.clone().elem(ProjectionElem::Subslice {
from: prefix.len() as u32,
to: suffix.len() as u32
});
match_pairs.push(MatchPair::new(subslice, subslice_pat));
}
match_pairs.extend(
suffix.iter()
.rev()
.enumerate()
.map(|(idx, subpattern)| {
let elem = ProjectionElem::ConstantIndex {
offset: (idx+1) as u32,
min_length,
from_end: true,
};
let place = place.clone().elem(elem);
MatchPair::new(place, subpattern)
})
);
}
}
impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
pub fn new(place: Place<'tcx>, pattern: &'pat Pattern<'tcx>) -> MatchPair<'pat, 'tcx> {
MatchPair {
place,
pattern,
slice_len_checked: false,
}
}
}
| 36.303797 | 91 | 0.413529 |
e8fb9b512350ad639da8f5feef7b8931b848c9ea | 3,199 | use futures::{future, Future};
pub use linkerd2_app_core::proxy::identity::{
certify, Crt, CrtKey, Csr, InvalidName, Key, Local, Name, TokenSource, TrustAnchors,
};
use linkerd2_app_core::{
classify,
config::{ControlAddr, ControlConfig},
control, dns, proxy, reconnect,
svc::{self, LayerExt},
transport::{connect, tls},
ControlHttpMetricsRegistry as Metrics, Error, Never,
};
use tracing::debug;
#[derive(Clone, Debug)]
pub enum Config {
Disabled,
Enabled {
control: ControlConfig,
certify: certify::Config,
},
}
pub enum Identity {
Disabled,
Enabled {
addr: ControlAddr,
local: Local,
task: Task,
},
}
pub type Task = Box<dyn Future<Item = (), Error = Never> + Send + 'static>;
pub type LocalIdentity = tls::Conditional<Local>;
impl Config {
pub fn build(self, dns: dns::Resolver, metrics: Metrics) -> Result<Identity, Error> {
match self {
Config::Disabled => Ok(Identity::Disabled),
Config::Enabled { control, certify } => {
let (local, crt_store) = Local::new(&certify);
let addr = control.addr;
let svc = svc::stack(connect::svc(control.connect.keepalive))
.push(tls::client::layer(tls::Conditional::Some(
certify.trust_anchors.clone(),
)))
.push_timeout(control.connect.timeout)
.push(control::client::layer())
.push(control::resolve::layer(dns))
.push(reconnect::layer({
let backoff = control.connect.backoff;
move |_| Ok(backoff.stream())
}))
.push(proxy::http::metrics::layer::<_, classify::Response>(
metrics,
))
.push(proxy::grpc::req_body_as_payload::layer().per_make())
.push(control::add_origin::layer())
.push_buffer_pending(
control.buffer.max_in_flight,
control.buffer.dispatch_timeout,
)
.into_inner()
.make(addr.clone());
// Save to be spawned on an auxiliary runtime.
let task = {
let addr = addr.clone();
Box::new(future::lazy(move || {
debug!(peer.addr = ?addr, "running");
certify::Daemon::new(certify, crt_store, svc)
}))
};
Ok(Identity::Enabled { addr, local, task })
}
}
}
}
impl Identity {
pub fn local(&self) -> LocalIdentity {
match self {
Identity::Disabled => tls::Conditional::None(tls::ReasonForNoIdentity::Disabled),
Identity::Enabled { ref local, .. } => tls::Conditional::Some(local.clone()),
}
}
pub fn task(self) -> Task {
match self {
Identity::Disabled => Box::new(futures::future::ok(())),
Identity::Enabled { task, .. } => task,
}
}
}
| 32.642857 | 93 | 0.503907 |
214021af03f15a7f830facd98ba6c60f1c40d1e2 | 5,017 | use dominator::{events, html, Dom, DomBuilder};
use futures_signals::signal::Signal;
use web_sys::HtmlElement;
use crate::components::mixins::disabled_signal_mixin;
pub enum ButtonType {
Elevated,
Contained,
Outlined,
Text,
}
pub enum ButtonStyle {
Prominent,
Neutral,
Unimportant,
}
impl Default for ButtonType {
fn default() -> Self {
ButtonType::Contained
}
}
impl Default for ButtonStyle {
fn default() -> Self {
Self::Prominent
}
}
pub enum ButtonContent {
Label(String),
Dom(Dom),
}
#[derive(Default)]
pub struct ButtonProps<
FClickCallback: Fn(events::Click) -> (),
TDisabledSignal: Signal<Item = bool> + Unpin,
> {
pub content: Option<ButtonContent>,
pub click_handler: FClickCallback,
pub button_type: ButtonType,
pub style: ButtonStyle,
pub disabled_signal: TDisabledSignal,
}
impl<FClickCallback: Fn(events::Click) -> (), TDisabledSignal: Signal<Item = bool> + Unpin>
ButtonProps<FClickCallback, TDisabledSignal>
{
pub fn new(
click_handler: FClickCallback,
disabled_signal: TDisabledSignal,
) -> ButtonProps<FClickCallback, TDisabledSignal> {
Self {
content: None,
click_handler,
button_type: ButtonType::Contained,
style: ButtonStyle::Prominent,
disabled_signal,
}
}
#[inline]
#[must_use]
pub fn content<U>(mut self, content: U) -> Self
where
U: Into<Dom>,
{
self.content = Some(ButtonContent::Dom(content.into()));
self
}
#[inline]
#[must_use]
pub fn button_type(mut self, button_type: ButtonType) -> Self {
self.button_type = button_type;
self
}
#[inline]
#[must_use]
pub fn style(mut self, style: ButtonStyle) -> Self {
self.style = style;
self
}
}
#[macro_export]
macro_rules! button {
($props: expr) => {{
$crate::components::button::button($props, |d| d)
}};
($props: expr, $mixin: expr) => {{
$crate::components::button::button($props, $mixin)
}};
}
#[inline]
pub fn button<FClickCallback, TDisabledSignal, F>(
button_props: ButtonProps<FClickCallback, TDisabledSignal>,
mixin: F,
) -> Dom
where
FClickCallback: Fn(events::Click) -> () + 'static,
TDisabledSignal: Signal<Item = bool> + Unpin + 'static,
F: FnOnce(DomBuilder<HtmlElement>) -> DomBuilder<HtmlElement>,
{
let content = button_props.content;
let click_handler = button_props.click_handler;
let disabled_signal = button_props.disabled_signal;
html!("button", {
.class("dmat-button")
.apply(mixin)
.class( match button_props.button_type {
ButtonType::Contained => "-contained",
ButtonType::Outlined => "-outlined",
ButtonType::Text => "-text",
ButtonType::Elevated => "-elevated",
})
.class(match button_props.style {
ButtonStyle::Prominent => "-prominent",
ButtonStyle::Neutral => "-neutral",
ButtonStyle::Unimportant => "-unimportant",
})
.apply(move |bdom| {
match content {
Some(ButtonContent::Label(label)) => bdom.text(label.as_str()),
Some(ButtonContent::Dom(dom)) => bdom.child(dom),
_ => bdom
}
})
.apply(move |dom| {
dom.event(click_handler)
})
.apply(disabled_signal_mixin(disabled_signal))
})
}
#[cfg(test)]
mod test {
use dominator::events::Click;
use dominator::{clone, html};
use futures_signals::signal::{Mutable, SignalExt};
use wasm_bindgen_test::*;
use web_sys::{HtmlButtonElement, HtmlElement};
use dominator_testing::{async_yield, mount_test_dom, test_dyn_element_by_id};
use crate::components::ButtonProps;
#[wasm_bindgen_test]
async fn button_test() {
let counter = Mutable::new(0);
let btn = button!(
ButtonProps::new(
clone!(counter => move |_: Click| {
counter.set(counter.get() + 1)
}),
counter.signal_cloned().map(|v| v > 0)
)
.content(html!("span")),
|d| d.attribute("id", "test-button")
);
mount_test_dom(btn);
test_dyn_element_by_id("test-button", |ele: &HtmlElement| {
ele.click();
});
assert_eq!(counter.get(), 1);
// We need to yield to v8 so that the disabled property actually propagates here :/
async_yield().await;
// Verify the counter won't increment after disabling the button
test_dyn_element_by_id("test-button", |ele: &HtmlElement| {
ele.click();
});
assert_eq!(counter.get(), 1);
async_yield().await;
test_dyn_element_by_id("test-button", |ele: &HtmlButtonElement| {
assert!(ele.disabled());
});
}
}
| 25.728205 | 91 | 0.584812 |
5647a2a9cc759363d0ddd3c803517c52a00ed1cc | 820 | //! Read lines from STDIN and signal when the STDIN has been consumed
use crate::common::Result;
use crate::events::Event;
use async_std::io;
use async_std::prelude::*;
use async_std::stream;
use async_std::sync::Sender;
/// Run the data input task
pub async fn task<R>(stdin: R, sender: Sender<Event>) -> Result<()>
where
R: io::Read + Unpin + Send + 'static,
{
log::trace!("starting to read input data");
let reader = io::BufReader::new(stdin);
let mut stream = reader
.lines()
.map(|res| res.expect("Error reading from STDIN"))
.filter(|line| !line.is_empty())
.map(Event::NewLine)
.chain(stream::once(Event::EOF));
while let Some(event) = stream.next().await {
sender.send(event).await;
}
log::trace!("input data done");
Ok(())
}
| 24.848485 | 69 | 0.620732 |
ff4c10dec7dcab9823b4ababbb378ac135bee12b | 6,972 | extern crate argparse;
extern crate nix;
extern crate pcap;
extern crate time;
use std::fs::{File, OpenOptions};
use std::io::stdout;
use std::io::Error;
use std::io::{Read, Write};
use std::net::Ipv4Addr;
use std::process;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
use std::thread;
use argparse::{ArgumentParser, Print, Store, StoreFalse, StoreTrue};
use nix::sys::signal;
use nix::sys::signal::SigHandler;
pub mod arp;
/// Struct which holds all possible arguments
struct ArgOptions {
interface: String,
own_ip: Ipv4Addr,
target_ip: Ipv4Addr,
gateway_ip: Ipv4Addr,
ip_forward: bool,
verbose: bool,
log_traffic: bool,
}
fn main() {
// Define SIGINT handler
let sig_action = signal::SigAction::new(
SigHandler::Handler(handle_sigint),
signal::SaFlags::empty(),
signal::SigSet::empty(),
);
unsafe {
match signal::sigaction(signal::SIGINT, &sig_action) {
Ok(_) => (),
Err(e) => panic!("Unable to register SIGINT handler: {}", e),
}
}
let arg_options = parse_args();
let own_mac_addr = get_interface_mac_addr(arg_options.interface.as_ref());
println!(
"Own mac address for {} is: {}",
arg_options.interface,
arp::mac_to_string(&own_mac_addr)
);
// Enable kernel ip forwarding
if arg_options.ip_forward {
match ip_forward(arg_options.ip_forward) {
Ok(_) => (),
Err(e) => panic!("ip_forward() failed! {}", e),
}
}
// Enable traffic logging
if arg_options.log_traffic {
let log_cap_filter = format!("host {}", arg_options.target_ip);
let mut log_cap = pcap_open(arg_options.interface.as_ref(), log_cap_filter.as_ref(), 0);
thread::spawn(move || {
log_traffic_pcap(&mut log_cap, "save.pcap");
});
}
// Start arp spoofing
let cap_ptr = Arc::new(Mutex::new(pcap_open(
arg_options.interface.as_ref(),
"arp",
5000,
)));
arp::arp_poisoning(
cap_ptr,
own_mac_addr,
arg_options.own_ip,
arg_options.target_ip,
arg_options.gateway_ip,
);
}
/// Opens a pcap capture device
fn pcap_open(
interface_name: &str,
pcap_filter: &str,
pcap_timeout: i32,
) -> pcap::Capture<pcap::Active> {
let mut cap = pcap::Capture::from_device(interface_name)
.unwrap()
.timeout(pcap_timeout)
.open()
.unwrap();
cap.filter(pcap_filter).unwrap();
cap
}
/// extern "C" sigint handler
extern "C" fn handle_sigint(_: i32) {
println!("\nInterrupted!");
match ip_forward(false) {
Ok(_) => (),
Err(e) => println!("{}", e),
}
process::exit(1);
}
/// Parses args or panics if something is missing.
fn parse_args() -> ArgOptions {
let mut options = ArgOptions {
interface: String::from(""),
own_ip: Ipv4Addr::new(0, 0, 0, 0),
target_ip: Ipv4Addr::new(0, 0, 0, 0),
gateway_ip: Ipv4Addr::new(0, 0, 0, 0),
ip_forward: true,
verbose: false,
log_traffic: false,
};
{
// This block limits scope of borrows by ap.refer() method
let mut ap = ArgumentParser::new();
ap.set_description("Minimal ARP spoofing tool written in rust.");
ap.refer(&mut options.interface)
.add_option(&["-i", "--interface"], Store, "interface name")
.required();
ap.refer(&mut options.own_ip)
.add_option(
&["--own"],
Store,
"own ipv4 address (required until pcap allows ip enumeration)",
)
.required();
ap.refer(&mut options.target_ip)
.add_option(&["--target"], Store, "target ipv4 address")
.required();
ap.refer(&mut options.gateway_ip)
.add_option(&["--gateway"], Store, "gateway ipv4 address")
.required();
ap.refer(&mut options.log_traffic).add_option(
&["--log-traffic"],
StoreTrue,
"logs all target traffic to `save.pcap`",
);
ap.refer(&mut options.ip_forward).add_option(
&["-n", "--no-forward"],
StoreFalse,
"leave `/proc/sys/net/ipv4/ip_forward` untouched",
);
ap.refer(&mut options.verbose)
.add_option(&["-v", "--verbose"], StoreTrue, "be verbose");
ap.add_option(
&["-V", "--version"],
Print(env!("CARGO_PKG_VERSION").to_string()),
"show version",
);
ap.parse_args_or_exit();
}
// FIXME: use of unstable library feature 'ip': extra functionality has not been scrutinized to the level that it should be stable (see issue #27709)
//assert_eq!(true, options.target_ip.is_private());
//assert_eq!(true, options.gateway_ip.is_private());
options
}
/// Logs traffic to pcap file and prints network statistic
pub fn log_traffic_pcap(cap: &mut pcap::Capture<pcap::Active>, log_file: &str) {
let mut savefile = cap.savefile(log_file).unwrap();
let mut last_stats = time::precise_time_s();
let stats_threshold = 15.0;
loop {
{
let packet = cap.next().unwrap();
savefile.write(&packet);
}
if (time::precise_time_s() - last_stats) > stats_threshold {
let stats = cap.stats().unwrap();
print!(
"\r[*] Received: {}, dropped: {}, if_dropped: {}",
stats.received, stats.dropped, stats.if_dropped
);
match stdout().flush() {
Ok(_) => (),
Err(e) => println!("{}", e),
}
last_stats = time::precise_time_s();
}
}
}
/// Modifies `/proc/sys/net/ipv4/ip_forward` to enable/disable ip forwarding
fn ip_forward(enable: bool) -> Result<(), Error> {
let ipv4_fw_path = "/proc/sys/net/ipv4/ip_forward";
let ipv4_fw_value = match enable {
true => "1\n",
false => "0\n",
};
let result = match OpenOptions::new().write(true).open(ipv4_fw_path) {
Ok(mut f) => f.write_all(String::from(ipv4_fw_value).as_bytes()),
Err(e) => panic!("Unable to open {}: {}", ipv4_fw_path, e),
};
println!("[+] forwarding ipv4 traffic: {}", enable);
result
}
/// This function is obsolete as soon as device info enumeration is implemented
/// See: https://github.com/ebfull/pcap/issues/13
fn get_interface_mac_addr(interface_name: &str) -> [u8; 6] {
let path = format!("/sys/class/net/{}/address", interface_name);
let mut mac_addr_buf = String::new();
match File::open(&path) {
Ok(mut f) => f.read_to_string(&mut mac_addr_buf).unwrap(),
Err(e) => panic!(
"Unable to read mac address from {} (Network interface down?): {}",
path, e
),
};
arp::string_to_mac(String::from_str(mac_addr_buf.trim()).unwrap())
}
| 30.986667 | 153 | 0.574871 |
c18dd1459a144644872e472d210295c493881c5f | 2,194 | use core::future::Future;
use embedded_storage::nor_flash::ErrorType;
/// Read only NOR flash trait.
pub trait AsyncReadNorFlash: ErrorType {
/// The minumum number of bytes the storage peripheral can read
const READ_SIZE: usize;
type ReadFuture<'a>: Future<Output = Result<(), Self::Error>> + 'a
where
Self: 'a;
/// Read a slice of data from the storage peripheral, starting the read
/// operation at the given address offset, and reading `bytes.len()` bytes.
///
/// # Errors
///
/// Returns an error if the arguments are not aligned or out of bounds. The implementation
/// can use the [`check_read`] helper function.
fn read<'a>(&'a mut self, offset: u32, bytes: &'a mut [u8]) -> Self::ReadFuture<'a>;
/// The capacity of the peripheral in bytes.
fn capacity(&self) -> usize;
}
/// NOR flash trait.
pub trait AsyncNorFlash: AsyncReadNorFlash {
/// The minumum number of bytes the storage peripheral can write
const WRITE_SIZE: usize;
/// The minumum number of bytes the storage peripheral can erase
const ERASE_SIZE: usize;
type EraseFuture<'a>: Future<Output = Result<(), Self::Error>> + 'a
where
Self: 'a;
/// Erase the given storage range, clearing all data within `[from..to]`.
/// The given range will contain all 1s afterwards.
///
/// If power is lost during erase, contents of the page are undefined.
///
/// # Errors
///
/// Returns an error if the arguments are not aligned or out of bounds (the case where `to >
/// from` is considered out of bounds). The implementation can use the [`check_erase`]
/// helper function.
fn erase<'a>(&'a mut self, from: u32, to: u32) -> Self::EraseFuture<'a>;
type WriteFuture<'a>: Future<Output = Result<(), Self::Error>> + 'a
where
Self: 'a;
/// If power is lost during write, the contents of the written words are undefined,
/// but the rest of the page is guaranteed to be unchanged.
/// It is not allowed to write to the same word twice.
///
/// # Errors
///
/// Returns an error if the arguments are not aligned or out of bounds. The implementation
/// can use the [`check_write`] helper function.
fn write<'a>(&'a mut self, offset: u32, bytes: &'a [u8]) -> Self::WriteFuture<'a>;
}
| 34.825397 | 93 | 0.68505 |
76aa30c10dba75d2d5cc0c0e9d5a27fcad5a4f1e | 29,635 | // Generated from definition io.k8s.api.core.v1.PersistentVolume
/// PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PersistentVolume {
/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta,
/// Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
pub spec: Option<crate::api::core::v1::PersistentVolumeSpec>,
/// Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
pub status: Option<crate::api::core::v1::PersistentVolumeStatus>,
}
// Begin /v1/PersistentVolume
// Generated from operation createCoreV1PersistentVolume
impl PersistentVolume {
/// create a PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_persistent_volume(
body: &crate::api::core::v1::PersistentVolume,
optional: crate::CreateOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> {
let __url = "/api/v1/persistentvolumes?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::post(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteCoreV1CollectionPersistentVolume
impl PersistentVolume {
/// delete collection of PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_persistent_volume(
delete_optional: crate::DeleteOptional<'_>,
list_optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> {
let __url = "/api/v1/persistentvolumes?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteCoreV1PersistentVolume
impl PersistentVolume {
/// delete a PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_persistent_volume(
name: &str,
optional: crate::DeleteOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> {
let __url = format!("/api/v1/persistentvolumes/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listCoreV1PersistentVolume
impl PersistentVolume {
/// list or watch objects of kind PersistentVolume
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_persistent_volume(
optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = "/api/v1/persistentvolumes?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchCoreV1PersistentVolume
impl PersistentVolume {
/// partially update the specified PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_persistent_volume(
name: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/api/v1/persistentvolumes/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::patch(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchCoreV1PersistentVolumeStatus
impl PersistentVolume {
/// partially update status of the specified PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_persistent_volume_status(
name: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/api/v1/persistentvolumes/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::patch(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation readCoreV1PersistentVolume
impl PersistentVolume {
/// read the specified PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadPersistentVolumeResponse`]`>` constructor, or [`ReadPersistentVolumeResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_persistent_volume(
name: &str,
optional: ReadPersistentVolumeOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadPersistentVolumeResponse>), crate::RequestError> {
let ReadPersistentVolumeOptional {
pretty,
} = optional;
let __url = format!("/api/v1/persistentvolumes/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolume::read_persistent_volume`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadPersistentVolumeOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadPersistentVolumeResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolume::read_persistent_volume`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadPersistentVolumeResponse {
Ok(crate::api::core::v1::PersistentVolume),
Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadPersistentVolumeResponse {
fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
crate::http::StatusCode::OK => {
let result = match crate::serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadPersistentVolumeResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match crate::serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadPersistentVolumeResponse::Other(result), read))
},
}
}
}
// Generated from operation readCoreV1PersistentVolumeStatus
impl PersistentVolume {
/// read status of the specified PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadPersistentVolumeStatusResponse`]`>` constructor, or [`ReadPersistentVolumeStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_persistent_volume_status(
name: &str,
optional: ReadPersistentVolumeStatusOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadPersistentVolumeStatusResponse>), crate::RequestError> {
let ReadPersistentVolumeStatusOptional {
pretty,
} = optional;
let __url = format!("/api/v1/persistentvolumes/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolume::read_persistent_volume_status`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadPersistentVolumeStatusOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadPersistentVolumeStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolume::read_persistent_volume_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadPersistentVolumeStatusResponse {
Ok(crate::api::core::v1::PersistentVolume),
Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadPersistentVolumeStatusResponse {
fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
crate::http::StatusCode::OK => {
let result = match crate::serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadPersistentVolumeStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match crate::serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadPersistentVolumeStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceCoreV1PersistentVolume
impl PersistentVolume {
/// replace the specified PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_persistent_volume(
name: &str,
body: &crate::api::core::v1::PersistentVolume,
optional: crate::ReplaceOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/api/v1/persistentvolumes/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::put(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation replaceCoreV1PersistentVolumeStatus
impl PersistentVolume {
/// replace status of the specified PersistentVolume
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolume
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_persistent_volume_status(
name: &str,
body: &crate::api::core::v1::PersistentVolume,
optional: crate::ReplaceOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/api/v1/persistentvolumes/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::put(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchCoreV1PersistentVolume
impl PersistentVolume {
/// list or watch objects of kind PersistentVolume
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_persistent_volume(
optional: crate::WatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = "/api/v1/persistentvolumes?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// End /v1/PersistentVolume
impl crate::Resource for PersistentVolume {
const API_VERSION: &'static str = "v1";
const GROUP: &'static str = "";
const KIND: &'static str = "PersistentVolume";
const VERSION: &'static str = "v1";
}
impl crate::ListableResource for PersistentVolume {
const LIST_KIND: &'static str = "PersistentVolumeList";
}
impl crate::Metadata for PersistentVolume {
type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> &<Self as crate::Metadata>::Ty {
&self.metadata
}
fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty {
&mut self.metadata
}
}
impl<'de> crate::serde::Deserialize<'de> for PersistentVolume {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Key_status,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
"status" => Field::Key_status,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = PersistentVolume;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(<Self::Value as crate::Resource>::KIND)
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::api::core::v1::PersistentVolumeSpec> = None;
let mut value_status: Option<crate::api::core::v1::PersistentVolumeStatus> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::API_VERSION {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION));
}
},
Field::Key_kind => {
let value_kind: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::KIND {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND));
}
},
Field::Key_metadata => value_metadata = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_spec => value_spec = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PersistentVolume {
metadata: value_metadata.ok_or_else(|| crate::serde::de::Error::missing_field("metadata"))?,
spec: value_spec,
status: value_status,
})
}
}
deserializer.deserialize_struct(
<Self as crate::Resource>::KIND,
&[
"apiVersion",
"kind",
"metadata",
"spec",
"status",
],
Visitor,
)
}
}
impl crate::serde::Serialize for PersistentVolume {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
<Self as crate::Resource>::KIND,
3 +
self.spec.as_ref().map_or(0, |_| 1) +
self.status.as_ref().map_or(0, |_| 1),
)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?;
if let Some(value) = &self.spec {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
if let Some(value) = &self.status {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
| 43.838757 | 212 | 0.599393 |
16dfafd0b4ff2ac7be1737832cde568f5b87dcb3 | 28,352 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::cmp;
use core::iter::Filter;
use crate::tables::word::WordCat;
/// An iterator over the substrings of a string which, after splitting the string on
/// [word boundaries](http://www.unicode.org/reports/tr29/#Word_Boundaries),
/// contain any characters with the
/// [Alphabetic](http://unicode.org/reports/tr44/#Alphabetic)
/// property, or with
/// [General_Category=Number](http://unicode.org/reports/tr44/#General_Category_Values).
///
/// This struct is created by the [`unicode_words`] method on the [`UnicodeSegmentation`] trait. See
/// its documentation for more.
///
/// [`unicode_words`]: trait.UnicodeSegmentation.html#tymethod.unicode_words
/// [`UnicodeSegmentation`]: trait.UnicodeSegmentation.html
pub struct UnicodeWords<'a> {
inner: Filter<UWordBounds<'a>, fn(&&str) -> bool>,
}
impl<'a> Iterator for UnicodeWords<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
self.inner.next()
}
}
impl<'a> DoubleEndedIterator for UnicodeWords<'a> {
#[inline]
fn next_back(&mut self) -> Option<&'a str> {
self.inner.next_back()
}
}
/// An iterator over the substrings of a string which, after splitting the string on
/// [word boundaries](http://www.unicode.org/reports/tr29/#Word_Boundaries),
/// contain any characters with the
/// [Alphabetic](http://unicode.org/reports/tr44/#Alphabetic)
/// property, or with
/// [General_Category=Number](http://unicode.org/reports/tr44/#General_Category_Values).
/// This iterator also provides the byte offsets for each substring.
///
/// This struct is created by the [`unicode_word_indices`] method on the [`UnicodeSegmentation`] trait. See
/// its documentation for more.
///
/// [`unicode_word_indices`]: trait.UnicodeSegmentation.html#tymethod.unicode_word_indices
/// [`UnicodeSegmentation`]: trait.UnicodeSegmentation.html
pub struct UnicodeWordIndices<'a> {
inner: Filter<UWordBoundIndices<'a>, fn(&(usize, &str)) -> bool>,
}
impl<'a> Iterator for UnicodeWordIndices<'a> {
type Item = (usize, &'a str);
#[inline]
fn next(&mut self) -> Option<(usize, &'a str)> {
self.inner.next()
}
}
impl<'a> DoubleEndedIterator for UnicodeWordIndices<'a> {
#[inline]
fn next_back(&mut self) -> Option<(usize, &'a str)> {
self.inner.next_back()
}
}
/// External iterator for a string's
/// [word boundaries](http://www.unicode.org/reports/tr29/#Word_Boundaries).
///
/// This struct is created by the [`split_word_bounds`] method on the [`UnicodeSegmentation`]
/// trait. See its documentation for more.
///
/// [`split_word_bounds`]: trait.UnicodeSegmentation.html#tymethod.split_word_bounds
/// [`UnicodeSegmentation`]: trait.UnicodeSegmentation.html
#[derive(Clone)]
pub struct UWordBounds<'a> {
string: &'a str,
cat: Option<WordCat>,
catb: Option<WordCat>,
}
/// External iterator for word boundaries and byte offsets.
///
/// This struct is created by the [`split_word_bound_indices`] method on the
/// [`UnicodeSegmentation`] trait. See its documentation for more.
///
/// [`split_word_bound_indices`]: trait.UnicodeSegmentation.html#tymethod.split_word_bound_indices
/// [`UnicodeSegmentation`]: trait.UnicodeSegmentation.html
#[derive(Clone)]
pub struct UWordBoundIndices<'a> {
start_offset: usize,
iter: UWordBounds<'a>,
}
impl<'a> UWordBoundIndices<'a> {
#[inline]
/// View the underlying data (the part yet to be iterated) as a slice of the original string.
///
/// ```rust
/// # use unicode_segmentation::UnicodeSegmentation;
/// let mut iter = "Hello world".split_word_bound_indices();
/// assert_eq!(iter.as_str(), "Hello world");
/// iter.next();
/// assert_eq!(iter.as_str(), " world");
/// iter.next();
/// assert_eq!(iter.as_str(), "world");
/// ```
pub fn as_str(&self) -> &'a str {
self.iter.as_str()
}
}
impl<'a> Iterator for UWordBoundIndices<'a> {
type Item = (usize, &'a str);
#[inline]
fn next(&mut self) -> Option<(usize, &'a str)> {
self.iter
.next()
.map(|s| (s.as_ptr() as usize - self.start_offset, s))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> DoubleEndedIterator for UWordBoundIndices<'a> {
#[inline]
fn next_back(&mut self) -> Option<(usize, &'a str)> {
self.iter
.next_back()
.map(|s| (s.as_ptr() as usize - self.start_offset, s))
}
}
// state machine for word boundary rules
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum UWordBoundsState {
Start,
Letter,
HLetter,
Numeric,
Katakana,
ExtendNumLet,
Regional(RegionalState),
FormatExtend(FormatExtendType),
Zwj,
Emoji,
WSegSpace,
}
// subtypes for FormatExtend state in UWordBoundsState
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum FormatExtendType {
AcceptAny,
AcceptNone,
RequireLetter,
RequireHLetter,
AcceptQLetter,
RequireNumeric,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum RegionalState {
Half,
Full,
Unknown,
}
fn is_emoji(ch: char) -> bool {
use crate::tables::emoji;
emoji::emoji_category(ch).2 == emoji::EmojiCat::EC_Extended_Pictographic
}
impl<'a> Iterator for UWordBounds<'a> {
type Item = &'a str;
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let slen = self.string.len();
(cmp::min(slen, 1), Some(slen))
}
#[inline]
fn next(&mut self) -> Option<&'a str> {
use self::FormatExtendType::*;
use self::UWordBoundsState::*;
use crate::tables::word as wd;
if self.string.len() == 0 {
return None;
}
let mut take_curr = true;
let mut take_cat = true;
let mut idx = 0;
let mut saveidx = 0;
let mut state = Start;
let mut cat = wd::WC_Any;
let mut savecat = wd::WC_Any;
// If extend/format/zwj were skipped. Handles precedence of WB3d over WB4
let mut skipped_format_extend = false;
for (curr, ch) in self.string.char_indices() {
idx = curr;
// Whether or not the previous category was ZWJ
// ZWJs get collapsed, so this handles precedence of WB3c over WB4
let prev_zwj = cat == wd::WC_ZWJ;
// if there's a category cached, grab it
cat = match self.cat {
None => wd::word_category(ch).2,
_ => self.cat.take().unwrap(),
};
take_cat = true;
// handle rule WB4
// just skip all format, extend, and zwj chars
// note that Start is a special case: if there's a bunch of Format | Extend
// characters at the beginning of a block of text, dump them out as one unit.
//
// (This is not obvious from the wording of UAX#29, but if you look at the
// test cases http://www.unicode.org/Public/UNIDATA/auxiliary/WordBreakTest.txt
// then the "correct" interpretation of WB4 becomes apparent.)
if state != Start {
match cat {
wd::WC_Extend | wd::WC_Format | wd::WC_ZWJ => {
skipped_format_extend = true;
continue;
}
_ => {}
}
}
// rule WB3c
// WB4 makes all ZWJs collapse into the previous state
// but you can still be in a Zwj state if you started with Zwj
//
// This means that an EP + Zwj will collapse into EP, which is wrong,
// since EP+EP is not a boundary but EP+ZWJ+EP is
//
// Thus, we separately keep track of whether or not the last character
// was a ZWJ. This is an additional bit of state tracked outside of the
// state enum; the state enum represents the last non-zwj state encountered.
// When prev_zwj is true, for the purposes of WB3c, we are in the Zwj state,
// however we are in the previous state for the purposes of all other rules.
if prev_zwj {
if is_emoji(ch) {
state = Emoji;
continue;
}
}
// Don't use `continue` in this match without updating `cat`
state = match state {
Start if cat == wd::WC_CR => {
idx += match self.get_next_cat(idx) {
Some(ncat) if ncat == wd::WC_LF => 1, // rule WB3
_ => 0,
};
break; // rule WB3a
}
Start => match cat {
wd::WC_ALetter => Letter, // rule WB5, WB6, WB9, WB13a
wd::WC_Hebrew_Letter => HLetter, // rule WB5, WB6, WB7a, WB7b, WB9, WB13a
wd::WC_Numeric => Numeric, // rule WB8, WB10, WB12, WB13a
wd::WC_Katakana => Katakana, // rule WB13, WB13a
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a, WB13b
wd::WC_Regional_Indicator => Regional(RegionalState::Half), // rule WB13c
wd::WC_LF | wd::WC_Newline => break, // rule WB3a
wd::WC_ZWJ => Zwj, // rule WB3c
wd::WC_WSegSpace => WSegSpace, // rule WB3d
_ => {
if let Some(ncat) = self.get_next_cat(idx) {
// rule WB4
if ncat == wd::WC_Format || ncat == wd::WC_Extend || ncat == wd::WC_ZWJ
{
state = FormatExtend(AcceptNone);
self.cat = Some(ncat);
continue;
}
}
break; // rule WB999
}
},
WSegSpace => match cat {
wd::WC_WSegSpace if !skipped_format_extend => WSegSpace,
_ => {
take_curr = false;
break;
}
},
Zwj => {
// We already handle WB3c above.
take_curr = false;
break;
}
Letter | HLetter => match cat {
wd::WC_ALetter => Letter, // rule WB5
wd::WC_Hebrew_Letter => HLetter, // rule WB5
wd::WC_Numeric => Numeric, // rule WB9
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a
wd::WC_Double_Quote if state == HLetter => {
savecat = cat;
saveidx = idx;
FormatExtend(RequireHLetter) // rule WB7b
}
wd::WC_Single_Quote if state == HLetter => {
FormatExtend(AcceptQLetter) // rule WB7a
}
wd::WC_MidLetter | wd::WC_MidNumLet | wd::WC_Single_Quote => {
savecat = cat;
saveidx = idx;
FormatExtend(RequireLetter) // rule WB6
}
_ => {
take_curr = false;
break;
}
},
Numeric => match cat {
wd::WC_Numeric => Numeric, // rule WB8
wd::WC_ALetter => Letter, // rule WB10
wd::WC_Hebrew_Letter => HLetter, // rule WB10
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a
wd::WC_MidNum | wd::WC_MidNumLet | wd::WC_Single_Quote => {
savecat = cat;
saveidx = idx;
FormatExtend(RequireNumeric) // rule WB12
}
_ => {
take_curr = false;
break;
}
},
Katakana => match cat {
wd::WC_Katakana => Katakana, // rule WB13
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a
_ => {
take_curr = false;
break;
}
},
ExtendNumLet => match cat {
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a
wd::WC_ALetter => Letter, // rule WB13b
wd::WC_Hebrew_Letter => HLetter, // rule WB13b
wd::WC_Numeric => Numeric, // rule WB13b
wd::WC_Katakana => Katakana, // rule WB13b
_ => {
take_curr = false;
break;
}
},
Regional(RegionalState::Full) => {
// if it reaches here we've gone too far,
// a full flag can only compose with ZWJ/Extend/Format
// proceeding it.
take_curr = false;
break;
}
Regional(RegionalState::Half) => match cat {
wd::WC_Regional_Indicator => Regional(RegionalState::Full), // rule WB13c
_ => {
take_curr = false;
break;
}
},
Regional(_) => {
unreachable!("RegionalState::Unknown should not occur on forward iteration")
}
Emoji => {
// We already handle WB3c above. If you've reached this point, the emoji sequence is over.
take_curr = false;
break;
}
FormatExtend(t) => match t {
// handle FormatExtends depending on what type
RequireNumeric if cat == wd::WC_Numeric => Numeric, // rule WB11
RequireLetter | AcceptQLetter if cat == wd::WC_ALetter => Letter, // rule WB7
RequireLetter | AcceptQLetter if cat == wd::WC_Hebrew_Letter => HLetter, // WB7a
RequireHLetter if cat == wd::WC_Hebrew_Letter => HLetter, // rule WB7b
AcceptNone | AcceptQLetter => {
take_curr = false; // emit all the Format|Extend characters
take_cat = false;
break;
}
_ => break, // rewind (in if statement below)
},
}
}
if let FormatExtend(t) = state {
// we were looking for something and didn't find it; we have to back up
if t == RequireLetter || t == RequireHLetter || t == RequireNumeric {
idx = saveidx;
cat = savecat;
take_curr = false;
}
}
self.cat = if take_curr {
idx = idx + self.string[idx..].chars().next().unwrap().len_utf8();
None
} else if take_cat {
Some(cat)
} else {
None
};
let retstr = &self.string[..idx];
self.string = &self.string[idx..];
Some(retstr)
}
}
impl<'a> DoubleEndedIterator for UWordBounds<'a> {
#[inline]
fn next_back(&mut self) -> Option<&'a str> {
use self::FormatExtendType::*;
use self::UWordBoundsState::*;
use crate::tables::word as wd;
if self.string.len() == 0 {
return None;
}
let mut take_curr = true;
let mut take_cat = true;
let mut idx = self.string.len();
idx -= self.string.chars().next_back().unwrap().len_utf8();
let mut previdx = idx;
let mut saveidx = idx;
let mut state = Start;
let mut savestate = Start;
let mut cat = wd::WC_Any;
let mut skipped_format_extend = false;
for (curr, ch) in self.string.char_indices().rev() {
previdx = idx;
idx = curr;
// if there's a category cached, grab it
cat = match self.catb {
None => wd::word_category(ch).2,
_ => self.catb.take().unwrap(),
};
take_cat = true;
// backward iterator over word boundaries. Mostly the same as the forward
// iterator, with two weirdnesses:
// (1) If we encounter a single quote in the Start state, we have to check for a
// Hebrew Letter immediately before it.
// (2) Format and Extend char handling takes some gymnastics.
if cat == wd::WC_Extend || cat == wd::WC_Format || (cat == wd::WC_ZWJ && state != Zwj) {
// WB3c has more priority so we should not
// fold in that case
if match state {
FormatExtend(_) | Start => false,
_ => true,
} {
saveidx = previdx;
savestate = state;
state = FormatExtend(AcceptNone);
}
if state != Start {
continue;
}
} else if state == FormatExtend(AcceptNone) {
// finished a scan of some Format|Extend chars, restore previous state
state = savestate;
previdx = saveidx;
take_cat = false;
skipped_format_extend = true;
}
// Don't use `continue` in this match without updating `catb`
state = match state {
Start | FormatExtend(AcceptAny) => match cat {
_ if is_emoji(ch) => Zwj,
wd::WC_ALetter => Letter, // rule WB5, WB7, WB10, WB13b
wd::WC_Hebrew_Letter => HLetter, // rule WB5, WB7, WB7c, WB10, WB13b
wd::WC_Numeric => Numeric, // rule WB8, WB9, WB11, WB13b
wd::WC_Katakana => Katakana, // rule WB13, WB13b
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a
wd::WC_Regional_Indicator => Regional(RegionalState::Unknown), // rule WB13c
// rule WB4:
wd::WC_Extend | wd::WC_Format | wd::WC_ZWJ => FormatExtend(AcceptAny),
wd::WC_Single_Quote => {
saveidx = idx;
FormatExtend(AcceptQLetter) // rule WB7a
}
wd::WC_WSegSpace => WSegSpace,
wd::WC_CR | wd::WC_LF | wd::WC_Newline => {
if state == Start {
if cat == wd::WC_LF {
idx -= match self.get_prev_cat(idx) {
Some(pcat) if pcat == wd::WC_CR => 1, // rule WB3
_ => 0,
};
}
} else {
take_curr = false;
}
break; // rule WB3a
}
_ => break, // rule WB999
},
Zwj => match cat {
// rule WB3c
wd::WC_ZWJ => FormatExtend(AcceptAny),
_ => {
take_curr = false;
break;
}
},
WSegSpace => match cat {
// rule WB3d
wd::WC_WSegSpace if !skipped_format_extend => WSegSpace,
_ => {
take_curr = false;
break;
}
},
Letter | HLetter => match cat {
wd::WC_ALetter => Letter, // rule WB5
wd::WC_Hebrew_Letter => HLetter, // rule WB5
wd::WC_Numeric => Numeric, // rule WB10
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13b
wd::WC_Double_Quote if state == HLetter => {
saveidx = previdx;
FormatExtend(RequireHLetter) // rule WB7c
}
wd::WC_MidLetter | wd::WC_MidNumLet | wd::WC_Single_Quote => {
saveidx = previdx;
FormatExtend(RequireLetter) // rule WB7
}
_ => {
take_curr = false;
break;
}
},
Numeric => match cat {
wd::WC_Numeric => Numeric, // rule WB8
wd::WC_ALetter => Letter, // rule WB9
wd::WC_Hebrew_Letter => HLetter, // rule WB9
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13b
wd::WC_MidNum | wd::WC_MidNumLet | wd::WC_Single_Quote => {
saveidx = previdx;
FormatExtend(RequireNumeric) // rule WB11
}
_ => {
take_curr = false;
break;
}
},
Katakana => match cat {
wd::WC_Katakana => Katakana, // rule WB13
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13b
_ => {
take_curr = false;
break;
}
},
ExtendNumLet => match cat {
wd::WC_ExtendNumLet => ExtendNumLet, // rule WB13a
wd::WC_ALetter => Letter, // rule WB13a
wd::WC_Hebrew_Letter => HLetter, // rule WB13a
wd::WC_Numeric => Numeric, // rule WB13a
wd::WC_Katakana => Katakana, // rule WB13a
_ => {
take_curr = false;
break;
}
},
Regional(mut regional_state) => match cat {
// rule WB13c
wd::WC_Regional_Indicator => {
if regional_state == RegionalState::Unknown {
let count = self.string[..previdx]
.chars()
.rev()
.map(|c| wd::word_category(c).2)
.filter(|&c| {
!(c == wd::WC_ZWJ || c == wd::WC_Extend || c == wd::WC_Format)
})
.take_while(|&c| c == wd::WC_Regional_Indicator)
.count();
regional_state = if count % 2 == 0 {
RegionalState::Full
} else {
RegionalState::Half
};
}
if regional_state == RegionalState::Full {
take_curr = false;
break;
} else {
Regional(RegionalState::Full)
}
}
_ => {
take_curr = false;
break;
}
},
Emoji => {
if is_emoji(ch) {
// rule WB3c
Zwj
} else {
take_curr = false;
break;
}
}
FormatExtend(t) => match t {
RequireNumeric if cat == wd::WC_Numeric => Numeric, // rule WB12
RequireLetter if cat == wd::WC_ALetter => Letter, // rule WB6
RequireLetter if cat == wd::WC_Hebrew_Letter => HLetter, // rule WB6
AcceptQLetter if cat == wd::WC_Hebrew_Letter => HLetter, // rule WB7a
RequireHLetter if cat == wd::WC_Hebrew_Letter => HLetter, // rule WB7b
_ => break, // backtrack will happens
},
}
}
if let FormatExtend(t) = state {
// if we required something but didn't find it, backtrack
if t == RequireLetter
|| t == RequireHLetter
|| t == RequireNumeric
|| t == AcceptNone
|| t == AcceptQLetter
{
previdx = saveidx;
take_cat = false;
take_curr = false;
}
}
self.catb = if take_curr {
None
} else {
idx = previdx;
if take_cat {
Some(cat)
} else {
None
}
};
let retstr = &self.string[idx..];
self.string = &self.string[..idx];
Some(retstr)
}
}
impl<'a> UWordBounds<'a> {
#[inline]
/// View the underlying data (the part yet to be iterated) as a slice of the original string.
///
/// ```rust
/// # use unicode_segmentation::UnicodeSegmentation;
/// let mut iter = "Hello world".split_word_bounds();
/// assert_eq!(iter.as_str(), "Hello world");
/// iter.next();
/// assert_eq!(iter.as_str(), " world");
/// iter.next();
/// assert_eq!(iter.as_str(), "world");
/// ```
pub fn as_str(&self) -> &'a str {
self.string
}
#[inline]
fn get_next_cat(&self, idx: usize) -> Option<WordCat> {
use crate::tables::word as wd;
let nidx = idx + self.string[idx..].chars().next().unwrap().len_utf8();
if nidx < self.string.len() {
let nch = self.string[nidx..].chars().next().unwrap();
Some(wd::word_category(nch).2)
} else {
None
}
}
#[inline]
fn get_prev_cat(&self, idx: usize) -> Option<WordCat> {
use crate::tables::word as wd;
if idx > 0 {
let nch = self.string[..idx].chars().next_back().unwrap();
Some(wd::word_category(nch).2)
} else {
None
}
}
}
#[inline]
pub fn new_word_bounds<'b>(s: &'b str) -> UWordBounds<'b> {
UWordBounds {
string: s,
cat: None,
catb: None,
}
}
#[inline]
pub fn new_word_bound_indices<'b>(s: &'b str) -> UWordBoundIndices<'b> {
UWordBoundIndices {
start_offset: s.as_ptr() as usize,
iter: new_word_bounds(s),
}
}
#[inline]
fn has_alphanumeric(s: &&str) -> bool {
use crate::tables::util::is_alphanumeric;
s.chars().any(|c| is_alphanumeric(c))
}
#[inline]
pub fn new_unicode_words<'b>(s: &'b str) -> UnicodeWords<'b> {
use super::UnicodeSegmentation;
UnicodeWords {
inner: s.split_word_bounds().filter(has_alphanumeric),
}
}
#[inline]
pub fn new_unicode_word_indices<'b>(s: &'b str) -> UnicodeWordIndices<'b> {
use super::UnicodeSegmentation;
UnicodeWordIndices {
inner: s
.split_word_bound_indices()
.filter(|(_, c)| has_alphanumeric(c)),
}
}
| 37.552318 | 110 | 0.471254 |
28c94eb4c818d03519f74040f180753a4260b1fe | 5,663 | //! `TcpStream` split support.
//!
//! A `TcpStream` can be split into a `ReadHalf` and a
//! `WriteHalf` with the `TcpStream::split` method. `ReadHalf`
//! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`.
//!
//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized
//! split has no associated overhead and enforces all invariants at the type
//! level.
use crate::future::poll_fn;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::net::TcpStream;
use std::io;
use std::net::Shutdown;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Borrowed read half of a [`TcpStream`], created by [`split`].
///
/// Reading from a `ReadHalf` is usually done using the convenience methods found on the
/// [`AsyncReadExt`] trait. Examples import this trait through [the prelude].
///
/// [`TcpStream`]: TcpStream
/// [`split`]: TcpStream::split()
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
/// [the prelude]: crate::prelude
#[derive(Debug)]
pub struct ReadHalf<'a>(&'a TcpStream);
/// Borrowed write half of a [`TcpStream`], created by [`split`].
///
/// Note that in the [`AsyncWrite`] implemenation of this type, [`poll_shutdown`] will
/// shut down the TCP stream in the write direction.
///
/// Writing to an `WriteHalf` is usually done using the convenience methods found
/// on the [`AsyncWriteExt`] trait. Examples import this trait through [the prelude].
///
/// [`TcpStream`]: TcpStream
/// [`split`]: TcpStream::split()
/// [`AsyncWrite`]: trait@crate::io::AsyncWrite
/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
/// [the prelude]: crate::prelude
#[derive(Debug)]
pub struct WriteHalf<'a>(&'a TcpStream);
pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) {
(ReadHalf(&*stream), WriteHalf(&*stream))
}
impl ReadHalf<'_> {
/// Attempt to receive data on the socket, without removing that data from
/// the queue, registering the current task for wakeup if data is not yet
/// available.
///
/// See the [`TcpStream::poll_peek`] level documenation for more details.
///
/// # Examples
///
/// ```no_run
/// use tokio::io;
/// use tokio::net::TcpStream;
///
/// use futures::future::poll_fn;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut stream = TcpStream::connect("127.0.0.1:8000").await?;
/// let (mut read_half, _) = stream.split();
/// let mut buf = [0; 10];
///
/// poll_fn(|cx| {
/// read_half.poll_peek(cx, &mut buf)
/// }).await?;
///
/// Ok(())
/// }
/// ```
///
/// [`TcpStream::poll_peek`]: TcpStream::poll_peek
pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
self.0.poll_peek(cx, buf)
}
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue. On success,
/// returns the number of bytes peeked.
///
/// See the [`TcpStream::peek`] level documenation for more details.
///
/// [`TcpStream::peek`]: TcpStream::peek
///
/// # Examples
///
/// ```no_run
/// use tokio::net::TcpStream;
/// use tokio::prelude::*;
/// use std::error::Error;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn Error>> {
/// // Connect to a peer
/// let mut stream = TcpStream::connect("127.0.0.1:8080").await?;
/// let (mut read_half, _) = stream.split();
///
/// let mut b1 = [0; 10];
/// let mut b2 = [0; 10];
///
/// // Peek at the data
/// let n = read_half.peek(&mut b1).await?;
///
/// // Read the data
/// assert_eq!(n, read_half.read(&mut b2[..n]).await?);
/// assert_eq!(&b1[..n], &b2[..n]);
///
/// Ok(())
/// }
/// ```
///
/// The [`read`] method is defined on the [`AsyncReadExt`] trait.
///
/// [`read`]: fn@crate::io::AsyncReadExt::read
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> {
poll_fn(|cx| self.poll_peek(cx, buf)).await
}
}
impl AsyncRead for ReadHalf<'_> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.0.poll_read_priv(cx, buf)
}
}
impl AsyncWrite for WriteHalf<'_> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.0.poll_write_priv(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
self.0.poll_write_vectored_priv(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
// tcp flush is a no-op
Poll::Ready(Ok(()))
}
// `poll_shutdown` on a write half shutdowns the stream in the "write" direction.
fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
self.0.shutdown(Shutdown::Write).into()
}
}
impl AsRef<TcpStream> for ReadHalf<'_> {
fn as_ref(&self) -> &TcpStream {
self.0
}
}
impl AsRef<TcpStream> for WriteHalf<'_> {
fn as_ref(&self) -> &TcpStream {
self.0
}
}
| 30.610811 | 98 | 0.572841 |
5b8be81dba420b0ad90e4f304d3b8a421b880fc5 | 3,884 | use rand::prelude::*;
use std::f32;
use std::sync::atomic::{AtomicUsize, Ordering};
use hitable::{Sphere};
use math::*;
use material::{Material, ScatterRay};
pub struct Scene {
objects: Vec<Sphere>,
pub ray_count: AtomicUsize,
}
impl Scene {
pub fn generate(rng: &mut SmallRng) -> Scene {
let n = 500;
let mut scene = Scene { objects: Vec::with_capacity(n), ray_count: AtomicUsize::new(0) };
scene.objects.push(Sphere{center: vec3f(0.0, -1000.0, 0.0), radius: 1000.0, material: Material::Lambertian{albedo: vec3f(0.5, 0.5, 0.5)}});
for a in -11..11 {
for b in -11..11 {
let choose_mat = rng.gen::<f32>();
let center = vec3f(a as f32 + 0.9*rng.gen::<f32>(), 0.2, b as f32 + 0.9*rng.gen::<f32>());
if (center-vec3f(4.0, 0.2, 0.0)).length() > 0.9 {
if choose_mat < 0.8 {
let albedo_x = rng.gen::<f32>() * rng.gen::<f32>();
let albedo_y = rng.gen::<f32>() * rng.gen::<f32>();
let albedo_z = rng.gen::<f32>() * rng.gen::<f32>();
scene.objects.push(Sphere{center: center, radius: 0.2, material: Material::Lambertian{albedo: vec3f(albedo_x, albedo_y, albedo_z)}});
}
else if choose_mat < 0.95 {
let albedo_x = 0.5*(1.0 + rng.gen::<f32>());
let albedo_y = 0.5*(1.0 + rng.gen::<f32>());
let albedo_z = 0.5*(1.0 + rng.gen::<f32>());
let fuzziness = 0.5*rng.gen::<f32>();
scene.objects.push(Sphere{center: center, radius: 0.2, material: Material::Metal{albedo: vec3f(albedo_x, albedo_y, albedo_z), fuzz: fuzziness}});
}
else {
scene.objects.push(Sphere{center: center, radius: 0.2, material: Material::Dielectric{refraction_index: 1.5}});
}
}
}
}
scene.objects.push(Sphere{center: vec3f(0.0, 1.0, 0.0), radius: 1.0, material: Material::Dielectric{refraction_index: 1.5}});
scene.objects.push(Sphere{center: vec3f(-4.0, 1.0, 0.0), radius: 1.0, material: Material::Lambertian{albedo: vec3f(0.4, 0.2, 0.1)}});
scene.objects.push(Sphere{center: vec3f(4.0, 1.0, 0.0), radius: 1.0, material: Material::Metal{albedo: vec3f(0.7, 0.6, 0.5), fuzz: 0.0}});
return scene;
}
pub fn get_ray_count(&self) -> usize {
self.ray_count.load(Ordering::Relaxed)
}
pub fn ray_trace(&self, ray: &Ray, depth: i32, rng: &mut SmallRng) -> Vec3f {
self.ray_count.fetch_add(1, Ordering::Relaxed);
if depth >= 50 {
return Vec3f::zero();
}
if let Some(scatter) = self.hit(ray, 0.001, f32::MAX, rng) {
return scatter.attenuation * self.ray_trace(&scatter.ray, depth+1, rng);
} else {
let unit_direction = normalize(ray.direction);
let t = 0.5 * (unit_direction.y + 1.0);
return (1.0 - t) * vec3f(1.0, 1.0, 1.0) + (t * Vec3f::new(0.5, 0.7, 1.0));
}
}
fn hit(&self, ray: &Ray, t_min: f32, t_max: f32, rng: &mut SmallRng) -> Option<ScatterRay> {
let mut closest_so_far = t_max;
let mut hit_object: Option<&Sphere> = None;
for object in &self.objects {
if let Some(ray_param) = object.hit(ray, t_min, closest_so_far) {
closest_so_far = ray_param;
hit_object = Some(object);
}
}
if let Some(&object) = hit_object {
let point = ray.point_at_time(closest_so_far);
let normal = (point - object.center) / object.radius;
return object.material.scatter(&ray, point, normal, rng);
}
None
}
}
| 41.763441 | 169 | 0.525232 |
29beabdb2abdf580fc52d5b5078f4b003052f60a | 19,556 | use std::{fmt, env};
use crate::hir::map::definitions::DefPathData;
use crate::mir;
use crate::ty::{self, Ty, layout};
use crate::ty::layout::{Size, Align, LayoutError};
use rustc_target::spec::abi::Abi;
use super::{RawConst, Pointer, InboundsCheck, ScalarMaybeUndef};
use backtrace::Backtrace;
use crate::ty::query::TyCtxtAt;
use errors::DiagnosticBuilder;
use syntax_pos::{Pos, Span};
use syntax::ast;
use syntax::symbol::Symbol;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ErrorHandled {
/// Already reported a lint or an error for this evaluation.
Reported,
/// Don't emit an error, the evaluation failed because the MIR was generic
/// and the substs didn't fully monomorphize it.
TooGeneric,
}
impl ErrorHandled {
pub fn assert_reported(self) {
match self {
ErrorHandled::Reported => {},
ErrorHandled::TooGeneric => bug!("MIR interpretation failed without reporting an error \
even though it was fully monomorphized"),
}
}
}
pub type ConstEvalRawResult<'tcx> = Result<RawConst<'tcx>, ErrorHandled>;
pub type ConstEvalResult<'tcx> = Result<ty::Const<'tcx>, ErrorHandled>;
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct ConstEvalErr<'tcx> {
pub span: Span,
pub error: crate::mir::interpret::EvalErrorKind<'tcx, u64>,
pub stacktrace: Vec<FrameInfo<'tcx>>,
}
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct FrameInfo<'tcx> {
pub call_site: Span, // this span is in the caller!
pub instance: ty::Instance<'tcx>,
pub lint_root: Option<ast::NodeId>,
}
impl<'tcx> fmt::Display for FrameInfo<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ty::tls::with(|tcx| {
if tcx.def_key(self.instance.def_id()).disambiguated_data.data
== DefPathData::ClosureExpr
{
write!(f, "inside call to closure")?;
} else {
write!(f, "inside call to `{}`", self.instance)?;
}
if !self.call_site.is_dummy() {
let lo = tcx.sess.source_map().lookup_char_pos_adj(self.call_site.lo());
write!(f, " at {}:{}:{}", lo.filename, lo.line, lo.col.to_usize() + 1)?;
}
Ok(())
})
}
}
impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> {
pub fn struct_error(&self,
tcx: TyCtxtAt<'a, 'gcx, 'tcx>,
message: &str)
-> Result<DiagnosticBuilder<'tcx>, ErrorHandled>
{
self.struct_generic(tcx, message, None)
}
pub fn report_as_error(&self,
tcx: TyCtxtAt<'a, 'gcx, 'tcx>,
message: &str
) -> ErrorHandled {
let err = self.struct_error(tcx, message);
match err {
Ok(mut err) => {
err.emit();
ErrorHandled::Reported
},
Err(err) => err,
}
}
pub fn report_as_lint(&self,
tcx: TyCtxtAt<'a, 'gcx, 'tcx>,
message: &str,
lint_root: ast::NodeId,
) -> ErrorHandled {
let lint = self.struct_generic(
tcx,
message,
Some(lint_root),
);
match lint {
Ok(mut lint) => {
lint.emit();
ErrorHandled::Reported
},
Err(err) => err,
}
}
fn struct_generic(
&self,
tcx: TyCtxtAt<'a, 'gcx, 'tcx>,
message: &str,
lint_root: Option<ast::NodeId>,
) -> Result<DiagnosticBuilder<'tcx>, ErrorHandled> {
match self.error {
EvalErrorKind::Layout(LayoutError::Unknown(_)) |
EvalErrorKind::TooGeneric => return Err(ErrorHandled::TooGeneric),
EvalErrorKind::Layout(LayoutError::SizeOverflow(_)) |
EvalErrorKind::TypeckError => return Err(ErrorHandled::Reported),
_ => {},
}
trace!("reporting const eval failure at {:?}", self.span);
let mut err = if let Some(lint_root) = lint_root {
let node_id = self.stacktrace
.iter()
.rev()
.filter_map(|frame| frame.lint_root)
.next()
.unwrap_or(lint_root);
tcx.struct_span_lint_node(
crate::rustc::lint::builtin::CONST_ERR,
node_id,
tcx.span,
message,
)
} else {
struct_error(tcx, message)
};
err.span_label(self.span, self.error.to_string());
// Skip the last, which is just the environment of the constant. The stacktrace
// is sometimes empty because we create "fake" eval contexts in CTFE to do work
// on constant values.
if self.stacktrace.len() > 0 {
for frame_info in &self.stacktrace[..self.stacktrace.len()-1] {
err.span_label(frame_info.call_site, frame_info.to_string());
}
}
Ok(err)
}
}
pub fn struct_error<'a, 'gcx, 'tcx>(
tcx: TyCtxtAt<'a, 'gcx, 'tcx>,
msg: &str,
) -> DiagnosticBuilder<'tcx> {
struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
}
#[derive(Debug, Clone)]
pub struct EvalError<'tcx> {
pub kind: EvalErrorKind<'tcx, u64>,
pub backtrace: Option<Box<Backtrace>>,
}
impl<'tcx> EvalError<'tcx> {
pub fn print_backtrace(&mut self) {
if let Some(ref mut backtrace) = self.backtrace {
print_backtrace(&mut *backtrace);
}
}
}
fn print_backtrace(backtrace: &mut Backtrace) {
backtrace.resolve();
eprintln!("\n\nAn error occurred in miri:\n{:?}", backtrace);
}
impl<'tcx> From<EvalErrorKind<'tcx, u64>> for EvalError<'tcx> {
fn from(kind: EvalErrorKind<'tcx, u64>) -> Self {
let backtrace = match env::var("RUST_CTFE_BACKTRACE") {
// matching RUST_BACKTRACE, we treat "0" the same as "not present".
Ok(ref val) if val != "0" => {
let mut backtrace = Backtrace::new_unresolved();
if val == "immediate" {
// Print it now
print_backtrace(&mut backtrace);
None
} else {
Some(Box::new(backtrace))
}
},
_ => None,
};
EvalError {
kind,
backtrace,
}
}
}
pub type AssertMessage<'tcx> = EvalErrorKind<'tcx, mir::Operand<'tcx>>;
#[derive(Clone, RustcEncodable, RustcDecodable)]
pub enum EvalErrorKind<'tcx, O> {
/// This variant is used by machines to signal their own errors that do not
/// match an existing variant.
MachineError(String),
FunctionAbiMismatch(Abi, Abi),
FunctionArgMismatch(Ty<'tcx>, Ty<'tcx>),
FunctionRetMismatch(Ty<'tcx>, Ty<'tcx>),
FunctionArgCountMismatch,
NoMirFor(String),
UnterminatedCString(Pointer),
DanglingPointerDeref,
DoubleFree,
InvalidMemoryAccess,
InvalidFunctionPointer,
InvalidBool,
InvalidDiscriminant(ScalarMaybeUndef),
PointerOutOfBounds {
ptr: Pointer,
check: InboundsCheck,
allocation_size: Size,
},
InvalidNullPointerUsage,
ReadPointerAsBytes,
ReadBytesAsPointer,
ReadForeignStatic,
InvalidPointerMath,
ReadUndefBytes(Size),
DeadLocal,
InvalidBoolOp(mir::BinOp),
Unimplemented(String),
DerefFunctionPointer,
ExecuteMemory,
BoundsCheck { len: O, index: O },
Overflow(mir::BinOp),
OverflowNeg,
DivisionByZero,
RemainderByZero,
Intrinsic(String),
InvalidChar(u128),
StackFrameLimitReached,
OutOfTls,
TlsOutOfBounds,
AbiViolation(String),
AlignmentCheckFailed {
required: Align,
has: Align,
},
ValidationFailure(String),
CalledClosureAsFunction,
VtableForArgumentlessMethod,
ModifiedConstantMemory,
ModifiedStatic,
AssumptionNotHeld,
InlineAsm,
TypeNotPrimitive(Ty<'tcx>),
ReallocatedWrongMemoryKind(String, String),
DeallocatedWrongMemoryKind(String, String),
ReallocateNonBasePtr,
DeallocateNonBasePtr,
IncorrectAllocationInformation(Size, Size, Align, Align),
Layout(layout::LayoutError<'tcx>),
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),
Unreachable,
Panic {
msg: Symbol,
line: u32,
col: u32,
file: Symbol,
},
ReadFromReturnPointer,
PathNotFound(Vec<String>),
UnimplementedTraitSelection,
/// Abort in case type errors are reached
TypeckError,
/// Resolution can fail if we are in a too generic context
TooGeneric,
/// Cannot compute this constant because it depends on another one
/// which already produced an error
ReferencedConstant,
GeneratorResumedAfterReturn,
GeneratorResumedAfterPanic,
InfiniteLoop,
}
pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
impl<'tcx, O> EvalErrorKind<'tcx, O> {
pub fn description(&self) -> &str {
use self::EvalErrorKind::*;
match *self {
MachineError(ref inner) => inner,
FunctionAbiMismatch(..) | FunctionArgMismatch(..) | FunctionRetMismatch(..)
| FunctionArgCountMismatch =>
"tried to call a function through a function pointer of incompatible type",
InvalidMemoryAccess =>
"tried to access memory through an invalid pointer",
DanglingPointerDeref =>
"dangling pointer was dereferenced",
DoubleFree =>
"tried to deallocate dangling pointer",
InvalidFunctionPointer =>
"tried to use a function pointer after offsetting it",
InvalidBool =>
"invalid boolean value read",
InvalidDiscriminant(..) =>
"invalid enum discriminant value read",
PointerOutOfBounds { .. } =>
"pointer offset outside bounds of allocation",
InvalidNullPointerUsage =>
"invalid use of NULL pointer",
ValidationFailure(..) =>
"type validation failed",
ReadPointerAsBytes =>
"a raw memory access tried to access part of a pointer value as raw bytes",
ReadBytesAsPointer =>
"a memory access tried to interpret some bytes as a pointer",
ReadForeignStatic =>
"tried to read from foreign (extern) static",
InvalidPointerMath =>
"attempted to do invalid arithmetic on pointers that would leak base addresses, \
e.g., comparing pointers into different allocations",
ReadUndefBytes(_) =>
"attempted to read undefined bytes",
DeadLocal =>
"tried to access a dead local variable",
InvalidBoolOp(_) =>
"invalid boolean operation",
Unimplemented(ref msg) => msg,
DerefFunctionPointer =>
"tried to dereference a function pointer",
ExecuteMemory =>
"tried to treat a memory pointer as a function pointer",
BoundsCheck{..} =>
"array index out of bounds",
Intrinsic(..) =>
"intrinsic failed",
NoMirFor(..) =>
"mir not found",
InvalidChar(..) =>
"tried to interpret an invalid 32-bit value as a char",
StackFrameLimitReached =>
"reached the configured maximum number of stack frames",
OutOfTls =>
"reached the maximum number of representable TLS keys",
TlsOutOfBounds =>
"accessed an invalid (unallocated) TLS key",
AbiViolation(ref msg) => msg,
AlignmentCheckFailed{..} =>
"tried to execute a misaligned read or write",
CalledClosureAsFunction =>
"tried to call a closure through a function pointer",
VtableForArgumentlessMethod =>
"tried to call a vtable function without arguments",
ModifiedConstantMemory =>
"tried to modify constant memory",
ModifiedStatic =>
"tried to modify a static's initial value from another static's initializer",
AssumptionNotHeld =>
"`assume` argument was false",
InlineAsm =>
"miri does not support inline assembly",
TypeNotPrimitive(_) =>
"expected primitive type, got nonprimitive",
ReallocatedWrongMemoryKind(_, _) =>
"tried to reallocate memory from one kind to another",
DeallocatedWrongMemoryKind(_, _) =>
"tried to deallocate memory of the wrong kind",
ReallocateNonBasePtr =>
"tried to reallocate with a pointer not to the beginning of an existing object",
DeallocateNonBasePtr =>
"tried to deallocate with a pointer not to the beginning of an existing object",
IncorrectAllocationInformation(..) =>
"tried to deallocate or reallocate using incorrect alignment or size",
Layout(_) =>
"rustc layout computation failed",
UnterminatedCString(_) =>
"attempted to get length of a null terminated string, but no null found before end \
of allocation",
HeapAllocZeroBytes =>
"tried to re-, de- or allocate zero bytes on the heap",
HeapAllocNonPowerOfTwoAlignment(_) =>
"tried to re-, de-, or allocate heap memory with alignment that is not a power of \
two",
Unreachable =>
"entered unreachable code",
Panic { .. } =>
"the evaluated program panicked",
ReadFromReturnPointer =>
"tried to read from the return pointer",
PathNotFound(_) =>
"a path could not be resolved, maybe the crate is not loaded",
UnimplementedTraitSelection =>
"there were unresolved type arguments during trait selection",
TypeckError =>
"encountered constants with type errors, stopping evaluation",
TooGeneric =>
"encountered overly generic constant",
ReferencedConstant =>
"referenced constant has errors",
Overflow(mir::BinOp::Add) => "attempt to add with overflow",
Overflow(mir::BinOp::Sub) => "attempt to subtract with overflow",
Overflow(mir::BinOp::Mul) => "attempt to multiply with overflow",
Overflow(mir::BinOp::Div) => "attempt to divide with overflow",
Overflow(mir::BinOp::Rem) => "attempt to calculate the remainder with overflow",
OverflowNeg => "attempt to negate with overflow",
Overflow(mir::BinOp::Shr) => "attempt to shift right with overflow",
Overflow(mir::BinOp::Shl) => "attempt to shift left with overflow",
Overflow(op) => bug!("{:?} cannot overflow", op),
DivisionByZero => "attempt to divide by zero",
RemainderByZero => "attempt to calculate the remainder with a divisor of zero",
GeneratorResumedAfterReturn => "generator resumed after completion",
GeneratorResumedAfterPanic => "generator resumed after panicking",
InfiniteLoop =>
"duplicate interpreter state observed here, const evaluation will never terminate",
}
}
}
impl<'tcx> fmt::Display for EvalError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.kind)
}
}
impl<'tcx> fmt::Display for EvalErrorKind<'tcx, u64> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::EvalErrorKind::*;
match *self {
PointerOutOfBounds { ptr, check, allocation_size } => {
write!(f, "Pointer must be in-bounds{} at offset {}, but is outside bounds of \
allocation {} which has size {}",
match check {
InboundsCheck::Live => " and live",
InboundsCheck::MaybeDead => "",
},
ptr.offset.bytes(), ptr.alloc_id, allocation_size.bytes())
},
ValidationFailure(ref err) => {
write!(f, "type validation failed: {}", err)
}
NoMirFor(ref func) => write!(f, "no mir for `{}`", func),
FunctionAbiMismatch(caller_abi, callee_abi) =>
write!(f, "tried to call a function with ABI {:?} using caller ABI {:?}",
callee_abi, caller_abi),
FunctionArgMismatch(caller_ty, callee_ty) =>
write!(f, "tried to call a function with argument of type {:?} \
passing data of type {:?}",
callee_ty, caller_ty),
FunctionRetMismatch(caller_ty, callee_ty) =>
write!(f, "tried to call a function with return type {:?} \
passing return place of type {:?}",
callee_ty, caller_ty),
FunctionArgCountMismatch =>
write!(f, "tried to call a function with incorrect number of arguments"),
BoundsCheck { ref len, ref index } =>
write!(f, "index out of bounds: the len is {:?} but the index is {:?}", len, index),
ReallocatedWrongMemoryKind(ref old, ref new) =>
write!(f, "tried to reallocate memory from {} to {}", old, new),
DeallocatedWrongMemoryKind(ref old, ref new) =>
write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new),
Intrinsic(ref err) =>
write!(f, "{}", err),
InvalidChar(c) =>
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
AlignmentCheckFailed { required, has } =>
write!(f, "tried to access memory with alignment {}, but alignment {} is required",
has.bytes(), required.bytes()),
TypeNotPrimitive(ty) =>
write!(f, "expected primitive type, got {}", ty),
Layout(ref err) =>
write!(f, "rustc layout computation failed: {:?}", err),
PathNotFound(ref path) =>
write!(f, "Cannot find path {:?}", path),
MachineError(ref inner) =>
write!(f, "{}", inner),
IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, \
got size {} and align {}",
size.bytes(), align.bytes(), size2.bytes(), align2.bytes()),
Panic { ref msg, line, col, ref file } =>
write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col),
InvalidDiscriminant(val) =>
write!(f, "encountered invalid enum discriminant {}", val),
_ => write!(f, "{}", self.description()),
}
}
}
| 38.496063 | 100 | 0.563459 |
7503c86c9e0cde32d1ae3666275ee274da3a9b3c | 5,244 | //! Displays spheres with physically based materials.
extern crate amethyst;
extern crate genmesh;
use amethyst::assets::Loader;
use amethyst::core::cgmath::{Deg, Matrix4, Vector3};
use amethyst::core::transform::GlobalTransform;
use amethyst::prelude::*;
use amethyst::renderer::*;
use genmesh::{MapToVertices, Triangulate, Vertices};
use genmesh::generators::SphereUV;
struct Example;
impl State for Example {
fn on_start(&mut self, world: &mut World) {
let mat_defaults = world.read_resource::<MaterialDefaults>().0.clone();
let verts = gen_sphere(32, 32).into();
let albedo = [1.0, 1.0, 1.0, 1.0].into();
println!("Load mesh");
let (mesh, albedo) = {
let loader = world.read_resource::<Loader>();
let meshes = &world.read_resource();
let textures = &world.read_resource();
let mesh: MeshHandle = loader.load_from_data(verts, (), meshes);
let albedo = loader.load_from_data(albedo, (), textures);
(mesh, albedo)
};
println!("Create spheres");
for i in 0..5 {
for j in 0..5 {
let roughness = 1.0f32 * (i as f32 / 4.0f32);
let metallic = 1.0f32 * (j as f32 / 4.0f32);
let pos = Matrix4::from_translation(
[2.0f32 * (i - 2) as f32, 2.0f32 * (j - 2) as f32, 0.0].into(),
);
let metallic = [metallic, metallic, metallic, 1.0].into();
let roughness = [roughness, roughness, roughness, 1.0].into();
let (metallic, roughness) = {
let loader = world.read_resource::<Loader>();
let textures = &world.read_resource();
let metallic = loader.load_from_data(metallic, (), textures);
let roughness = loader.load_from_data(roughness, (), textures);
(metallic, roughness)
};
let mtl = Material {
albedo: albedo.clone(),
metallic,
roughness,
..mat_defaults.clone()
};
world
.create_entity()
.with(GlobalTransform(pos.into()))
.with(mesh.clone())
.with(mtl)
.build();
}
}
println!("Create lights");
let light1: Light = PointLight {
center: [6.0, 6.0, -6.0].into(),
intensity: 6.0,
color: [0.8, 0.0, 0.0].into(),
..PointLight::default()
}.into();
let light2: Light = PointLight {
center: [6.0, -6.0, -6.0].into(),
intensity: 5.0,
color: [0.0, 0.3, 0.7].into(),
..PointLight::default()
}.into();
world.create_entity().with(light1).build();
world.create_entity().with(light2).build();
println!("Put camera");
let transform =
Matrix4::from_translation([0.0, 0.0, -12.0].into()) * Matrix4::from_angle_y(Deg(180.));
world
.create_entity()
.with(Camera::from(Projection::perspective(1.3, Deg(60.0))))
.with(GlobalTransform(transform.into()))
.build();
}
fn handle_event(&mut self, _: &mut World, event: Event) -> Trans {
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
},
..
} => Trans::Quit,
_ => Trans::None,
},
_ => Trans::None,
}
}
}
fn run() -> Result<(), amethyst::Error> {
let path = format!(
"{}/examples/material/resources/display_config.ron",
env!("CARGO_MANIFEST_DIR")
);
let config = DisplayConfig::load(&path);
let resources = format!("{}/examples/assets/", env!("CARGO_MANIFEST_DIR"));
let pipe = Pipeline::build().with_stage(
Stage::with_backbuffer()
.clear_target([0.0, 0.0, 0.0, 1.0], 1.0)
.with_pass(DrawPbm::<PosNormTangTex>::new()),
);
let mut game = Application::build(&resources, Example)?
.with_bundle(RenderBundle::new(pipe, Some(config)))?
.build()?;
game.run();
Ok(())
}
fn main() {
if let Err(e) = run() {
println!("Failed to execute example: {}", e);
::std::process::exit(1);
}
}
fn gen_sphere(u: usize, v: usize) -> Vec<PosNormTangTex> {
SphereUV::new(u, v)
.vertex(|vertex| {
let normal = Vector3::from(vertex.normal);
let up = Vector3::from([0.0, 1.0, 0.0]);
let tangent = normal.cross(up).cross(normal);
PosNormTangTex {
position: vertex.pos,
normal: vertex.normal,
tangent: tangent.into(),
tex_coord: [0.1, 0.1],
}
})
.triangulate()
.vertices()
.collect()
}
| 31.781818 | 99 | 0.492182 |
d5b1ed2ffacf2b16896f349899bb2fd49f194014 | 1,801 | #[doc = "Writer for register TASKS_STARTTX"]
pub type W = crate::W<u32, super::TASKS_STARTTX>;
#[doc = "Register TASKS_STARTTX `reset()`'s with value 0"]
impl crate::ResetValue for super::TASKS_STARTTX {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Start TWI transmit sequence\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TASKS_STARTTX_AW {
#[doc = "1: Trigger task"]
TRIGGER,
}
impl From<TASKS_STARTTX_AW> for bool {
#[inline(always)]
fn from(variant: TASKS_STARTTX_AW) -> Self {
match variant {
TASKS_STARTTX_AW::TRIGGER => true,
}
}
}
#[doc = "Write proxy for field `TASKS_STARTTX`"]
pub struct TASKS_STARTTX_W<'a> {
w: &'a mut W,
}
impl<'a> TASKS_STARTTX_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TASKS_STARTTX_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Trigger task"]
#[inline(always)]
pub fn trigger(self) -> &'a mut W {
self.variant(TASKS_STARTTX_AW::TRIGGER)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl W {
#[doc = "Bit 0 - Start TWI transmit sequence"]
#[inline(always)]
pub fn tasks_starttx(&mut self) -> TASKS_STARTTX_W {
TASKS_STARTTX_W { w: self }
}
}
| 27.287879 | 70 | 0.575791 |
ef9e232cb6e69380fbdb22a7336a4b35f97ae5fa | 13,399 | pub mod configuration_manager;
pub mod protocol;
use serde::{
de::{self, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{
fmt::{self, Debug},
str::FromStr,
string::ToString,
sync::Arc,
};
use crate::{
core::{
errors::ButtplugError,
messages::{
self, ButtplugDeviceCommandMessageUnion, ButtplugServerMessage, DeviceMessageAttributesMap,
RawReadCmd, RawReading, RawSubscribeCmd, RawUnsubscribeCmd, RawWriteCmd,
},
ButtplugResultFuture,
},
device::{
configuration_manager::{DeviceConfigurationManager, DeviceSpecifier, ProtocolDefinition},
protocol::ButtplugProtocol,
},
};
use async_trait::async_trait;
use configuration_manager::DeviceProtocolConfiguration;
use core::hash::{Hash, Hasher};
use futures::future::BoxFuture;
use tokio::sync::broadcast;
// We need this array to be exposed in our WASM FFI, but the only way to do that
// is to expose it at the declaration level. Therefore, we use the WASM feature
// to assume we're building for WASM and attach our bindgen. The serde
// de/serialization is taken care of at the FFI level.
#[derive(EnumString, Clone, Debug, PartialEq, Eq, Hash, Display, Copy)]
#[strum(serialize_all = "lowercase")]
pub enum Endpoint {
Command,
Firmware,
Rx,
RxAccel,
RxBLEBattery,
RxPressure,
RxTouch,
Tx,
TxMode,
TxShock,
TxVibrate,
TxVendorControl,
Whitelist,
Generic0,
Generic1,
Generic2,
Generic3,
Generic4,
Generic5,
Generic6,
Generic7,
Generic8,
Generic9,
Generic10,
Generic11,
Generic12,
Generic13,
Generic14,
Generic15,
Generic16,
Generic17,
Generic18,
Generic19,
Generic20,
Generic21,
Generic22,
Generic23,
Generic24,
Generic25,
Generic26,
Generic27,
Generic28,
Generic29,
Generic30,
Generic31,
}
impl Serialize for Endpoint {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
struct EndpointVisitor;
impl<'de> Visitor<'de> for EndpointVisitor {
type Value = Endpoint;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string representing an endpoint")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Endpoint::from_str(value).map_err(|e| E::custom(format!("{}", e)))
}
}
impl<'de> Deserialize<'de> for Endpoint {
fn deserialize<D>(deserializer: D) -> Result<Endpoint, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_str(EndpointVisitor)
}
}
pub type ButtplugDeviceResultFuture =
BoxFuture<'static, Result<ButtplugServerMessage, ButtplugError>>;
#[derive(PartialEq, Debug)]
pub struct DeviceReadCmd {
pub endpoint: Endpoint,
pub length: u32,
pub timeout_ms: u32,
}
impl DeviceReadCmd {
pub fn new(endpoint: Endpoint, length: u32, timeout_ms: u32) -> Self {
Self {
endpoint,
length,
timeout_ms,
}
}
}
impl From<RawReadCmd> for DeviceReadCmd {
fn from(msg: RawReadCmd) -> Self {
Self {
endpoint: msg.endpoint(),
length: msg.expected_length(),
timeout_ms: msg.timeout(),
}
}
}
#[derive(PartialEq, Debug)]
pub struct DeviceWriteCmd {
pub endpoint: Endpoint,
pub data: Vec<u8>,
pub write_with_response: bool,
}
impl DeviceWriteCmd {
pub fn new(endpoint: Endpoint, data: Vec<u8>, write_with_response: bool) -> Self {
Self {
endpoint,
data,
write_with_response,
}
}
}
impl From<RawWriteCmd> for DeviceWriteCmd {
fn from(msg: RawWriteCmd) -> Self {
Self {
endpoint: msg.endpoint(),
data: msg.data().clone(),
write_with_response: msg.write_with_response(),
}
}
}
#[derive(PartialEq, Debug)]
pub struct DeviceSubscribeCmd {
pub endpoint: Endpoint,
}
impl DeviceSubscribeCmd {
pub fn new(endpoint: Endpoint) -> Self {
Self { endpoint }
}
}
impl From<RawSubscribeCmd> for DeviceSubscribeCmd {
fn from(msg: RawSubscribeCmd) -> Self {
Self {
endpoint: msg.endpoint(),
}
}
}
#[derive(PartialEq, Debug)]
pub struct DeviceUnsubscribeCmd {
pub endpoint: Endpoint,
}
impl DeviceUnsubscribeCmd {
pub fn new(endpoint: Endpoint) -> Self {
Self { endpoint }
}
}
impl From<RawUnsubscribeCmd> for DeviceUnsubscribeCmd {
fn from(msg: RawUnsubscribeCmd) -> Self {
Self {
endpoint: msg.endpoint(),
}
}
}
#[derive(PartialEq, Debug)]
pub enum DeviceImplCommand {
// Endpoint, data, write with response
Write(DeviceWriteCmd),
// Endpoint, length, timeout in ms
Read(DeviceReadCmd),
Subscribe(DeviceSubscribeCmd),
Unsubscribe(DeviceUnsubscribeCmd),
}
impl From<RawWriteCmd> for DeviceImplCommand {
fn from(msg: RawWriteCmd) -> Self {
DeviceImplCommand::Write(msg.into())
}
}
impl From<RawSubscribeCmd> for DeviceImplCommand {
fn from(msg: RawSubscribeCmd) -> Self {
DeviceImplCommand::Subscribe(msg.into())
}
}
impl From<RawUnsubscribeCmd> for DeviceImplCommand {
fn from(msg: RawUnsubscribeCmd) -> Self {
DeviceImplCommand::Unsubscribe(msg.into())
}
}
impl From<DeviceReadCmd> for DeviceImplCommand {
fn from(msg: DeviceReadCmd) -> Self {
DeviceImplCommand::Read(msg)
}
}
impl From<DeviceWriteCmd> for DeviceImplCommand {
fn from(msg: DeviceWriteCmd) -> Self {
DeviceImplCommand::Write(msg)
}
}
impl From<DeviceSubscribeCmd> for DeviceImplCommand {
fn from(msg: DeviceSubscribeCmd) -> Self {
DeviceImplCommand::Subscribe(msg)
}
}
impl From<DeviceUnsubscribeCmd> for DeviceImplCommand {
fn from(msg: DeviceUnsubscribeCmd) -> Self {
DeviceImplCommand::Unsubscribe(msg)
}
}
#[derive(Debug)]
pub struct ButtplugDeviceImplInfo {
pub endpoints: Vec<Endpoint>,
pub manufacturer_name: Option<String>,
pub product_name: Option<String>,
pub serial_number: Option<String>,
}
#[derive(Debug)]
pub enum ButtplugDeviceCommand {
Connect,
Message(DeviceImplCommand),
Disconnect,
}
// TODO Split this down into connections and other returns.
#[derive(Debug)]
pub enum ButtplugDeviceReturn {
Connected(ButtplugDeviceImplInfo),
Ok(messages::Ok),
RawReading(messages::RawReading),
Error(ButtplugError),
}
#[derive(Debug, Clone)]
pub enum ButtplugDeviceEvent {
Connected(Arc<ButtplugDevice>),
Notification(String, Endpoint, Vec<u8>),
Removed(String),
}
pub struct DeviceImpl {
name: String,
address: String,
endpoints: Vec<Endpoint>,
internal_impl: Box<dyn DeviceImplInternal>,
}
impl DeviceImpl {
pub fn new(
name: &str,
address: &str,
endpoints: &[Endpoint],
internal_impl: Box<dyn DeviceImplInternal>,
) -> Self {
Self {
name: name.to_owned(),
address: address.to_owned(),
endpoints: endpoints.into(),
internal_impl,
}
}
pub fn name(&self) -> &str {
&self.name
}
pub fn address(&self) -> &str {
&self.address
}
pub fn connected(&self) -> bool {
self.internal_impl.connected()
}
pub fn event_stream(&self) -> broadcast::Receiver<ButtplugDeviceEvent> {
self.internal_impl.event_stream()
}
pub fn endpoints(&self) -> Vec<Endpoint> {
self.endpoints.clone()
}
pub fn disconnect(&self) -> ButtplugResultFuture {
self.internal_impl.disconnect()
}
pub fn read_value(
&self,
msg: DeviceReadCmd,
) -> BoxFuture<'static, Result<RawReading, ButtplugError>> {
self.internal_impl.read_value(msg)
}
pub fn write_value(&self, msg: DeviceWriteCmd) -> ButtplugResultFuture {
self.internal_impl.write_value(msg)
}
pub fn subscribe(&self, msg: DeviceSubscribeCmd) -> ButtplugResultFuture {
self.internal_impl.subscribe(msg)
}
pub fn unsubscribe(&self, msg: DeviceUnsubscribeCmd) -> ButtplugResultFuture {
self.internal_impl.unsubscribe(msg)
}
}
pub trait DeviceImplInternal: Sync + Send {
fn connected(&self) -> bool;
fn disconnect(&self) -> ButtplugResultFuture;
// Ugh. Don't want to have to pass these around internally, but don't have a
// better solution yet.
fn event_stream(&self) -> broadcast::Receiver<ButtplugDeviceEvent>;
fn read_value(&self, msg: DeviceReadCmd)
-> BoxFuture<'static, Result<RawReading, ButtplugError>>;
fn write_value(&self, msg: DeviceWriteCmd) -> ButtplugResultFuture;
fn subscribe(&self, msg: DeviceSubscribeCmd) -> ButtplugResultFuture;
fn unsubscribe(&self, msg: DeviceUnsubscribeCmd) -> ButtplugResultFuture;
}
#[async_trait]
pub trait ButtplugDeviceImplCreator: Sync + Send + Debug {
fn get_specifier(&self) -> DeviceSpecifier;
async fn try_create_device_impl(
&mut self,
protocol: ProtocolDefinition,
) -> Result<DeviceImpl, ButtplugError>;
}
pub struct ButtplugDevice {
protocol: Box<dyn ButtplugProtocol>,
device: Arc<DeviceImpl>,
}
impl Debug for ButtplugDevice {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ButtplugDevice")
.field("name", &self.name())
.field("address", &self.address())
.finish()
}
}
impl Hash for ButtplugDevice {
fn hash<H: Hasher>(&self, state: &mut H) {
self.device.address().hash(state);
}
}
impl Eq for ButtplugDevice {}
impl PartialEq for ButtplugDevice {
fn eq(&self, other: &Self) -> bool {
self.device.address() == other.device.address()
}
}
impl ButtplugDevice {
pub fn new(protocol: Box<dyn ButtplugProtocol>, device: Arc<DeviceImpl>) -> Self {
Self { protocol, device }
}
pub fn address(&self) -> &str {
self.device.address()
}
pub async fn try_create_device(
device_config_mgr: Arc<DeviceConfigurationManager>,
mut device_creator: Box<dyn ButtplugDeviceImplCreator>,
) -> Result<Option<ButtplugDevice>, ButtplugError> {
// First off, we need to see if we even have a configuration available
// for the device we're trying to create. If we don't, return Ok(None),
// because this isn't actually an error. However, if we *do* have a
// configuration but something goes wrong after this, then it's an
// error.
match device_config_mgr.find_configuration(&device_creator.get_specifier()) {
Some((allow_raw_messages, config_name, config)) => {
// Now that we have both a possible device implementation and a
// configuration for that device, try to initialize the implementation.
// This usually means trying to connect to whatever the device is,
// finding endpoints, etc.
let device_protocol_config = DeviceProtocolConfiguration::new(
allow_raw_messages,
config.defaults.clone(),
config.configurations.clone(),
);
// TODO Should we even return a config from the device_config_mgr if the
// protocol isn't there?
if device_config_mgr.has_protocol(&*config_name) {
match device_creator.try_create_device_impl(config).await {
Ok(device_impl) => {
info!(
address = tracing::field::display(device_impl.address()),
"Found Buttplug Device {}",
device_impl.name()
);
// If we've made it this far, we now have a connected device
// implementation with endpoints set up. We now need to run whatever
// protocol initialization might need to happen. We'll fetch a protocol
// creator, pass the device implementation to it, then let it do
// whatever it needs. For most protocols, this is a no-op. However, for
// devices like Lovense, some Kiiroo, etc, this can get fairly
// complicated.
let sharable_device_impl = Arc::new(device_impl);
match device_config_mgr.get_protocol_creator(&*config_name)(sharable_device_impl.clone(), device_protocol_config).await
{
Ok(protocol_impl) => Ok(Some(ButtplugDevice::new(
protocol_impl,
sharable_device_impl,
))),
Err(e) => Err(e),
}
}
Err(e) => Err(e),
}
} else {
info!("Protocol {} not available", config_name);
Ok(None)
}
}
None => Ok(None),
}
}
pub fn name(&self) -> String {
// Instead of checking for raw messages at the protocol level, add the raw
// call here, since this is the only way to access devices in the library
// anyways.
//
// Having raw turned on means it'll work for read/write/sub/unsub on any
// endpoint so just use an arbitrary message here to check.
if self
.protocol
.supports_message(&ButtplugDeviceCommandMessageUnion::RawSubscribeCmd(
RawSubscribeCmd::new(1, Endpoint::Tx),
))
.is_ok()
{
format!("{} (Raw)", self.protocol.name())
} else {
self.protocol.name().to_owned()
}
}
pub fn disconnect(&self) -> ButtplugResultFuture {
self.device.disconnect()
}
pub fn message_attributes(&self) -> DeviceMessageAttributesMap {
self.protocol.message_attributes()
}
pub fn parse_message(
&self,
message: ButtplugDeviceCommandMessageUnion,
) -> ButtplugDeviceResultFuture {
self.protocol.handle_command(self.device.clone(), message)
}
pub fn event_stream(&self) -> broadcast::Receiver<ButtplugDeviceEvent> {
self.device.event_stream()
}
// TODO Handle raw messages here.
}
| 25.521905 | 133 | 0.670871 |
39cabaa7930897027c6dbf27c6cfdb715f96c36a | 2,298 | #[derive(Debug, Clone)]
pub enum Expr {
Literal(Literal),
This(SourceLocation),
Unary(UnaryOp, Box<Expr>),
Binary(Box<Expr>, BinaryOp, Box<Expr>),
Call(Box<Expr>, SourceLocation, Vec<Expr>),
Get(Box<Expr>, Symbol),
Grouping(Box<Expr>),
Variable(Symbol),
Assign(Symbol, Box<Expr>),
Logical(Box<Expr>, LogicalOp, Box<Expr>),
Set(Box<Expr>, Symbol, Box<Expr>),
Super(SourceLocation, Symbol),
List(Vec<Expr>),
Subscript {
value: Box<Expr>,
slice: Box<Expr>,
source_location: SourceLocation,
},
SetItem {
lhs: Box<Expr>,
slice: Box<Expr>,
rhs: Box<Expr>,
source_location: SourceLocation,
},
Lambda(LambdaDecl),
}
#[derive(Debug, Clone, Copy)]
pub struct SourceLocation {
pub line: usize,
pub col: i64,
}
#[derive(Debug, Clone)]
pub enum LogicalOp {
Or,
And,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub struct Symbol {
pub name: String,
pub line: usize,
pub col: i64,
}
#[derive(Debug, Clone)]
pub struct FunDecl {
pub name: Symbol,
pub params: Vec<Symbol>,
pub body: Vec<Stmt>,
}
#[derive(Debug, Clone)]
pub struct LambdaDecl {
pub params: Vec<Symbol>,
pub body: Vec<Stmt>,
}
#[derive(Debug, Clone)]
pub struct ClassDecl {
pub name: Symbol,
pub superclass: Option<Symbol>,
pub methods: Vec<FunDecl>,
}
#[derive(Debug, Clone)]
pub enum Stmt {
Expr(Expr),
FunDecl(FunDecl),
ClassDecl(ClassDecl),
If(Expr, Box<Stmt>, Option<Box<Stmt>>),
Print(Expr),
VarDecl(Symbol, Option<Expr>),
Block(Vec<Stmt>),
Return(SourceLocation, Option<Expr>),
While(Expr, Box<Stmt>),
}
#[derive(Debug, Copy, Clone)]
pub enum UnaryOpTy {
Minus,
Bang,
}
#[derive(Debug, Copy, Clone)]
pub struct UnaryOp {
pub ty: UnaryOpTy,
pub line: usize,
pub col: i64,
}
#[derive(Debug, Copy, Clone)]
pub enum BinaryOpTy {
EqualEqual,
NotEqual,
Less,
LessEqual,
Greater,
GreaterEqual,
Plus,
Minus,
Star,
Slash,
}
#[derive(Debug, Copy, Clone)]
pub struct BinaryOp {
pub ty: BinaryOpTy,
pub line: usize,
pub col: i64,
}
#[derive(Debug, Clone)]
pub enum Literal {
Number(f64),
String(String),
True,
False,
Nil,
}
| 18.532258 | 47 | 0.606614 |
6471d8d3a13a22d963a0753cb5d2f0efb7af833a | 3,668 | use serde::Deserialize;
use yew::{
format::{Json, Nothing},
prelude::*,
services::fetch::{FetchService, FetchTask, Request, Response},
};
#[derive(Clone, Debug, PartialEq, Properties)]
pub struct Props {
pub marca: String,
}
use crate::{
switch::{AppAnchor, AppRoute},
};
#[derive(Deserialize, Debug, Clone)]
pub struct Struture
{
link: String
}
#[derive(Debug)]
pub enum Msg {
GetInfo,
ReceiveResponse(Result<Struture, anyhow::Error>),
}
#[derive(Debug)]
pub struct LoadInfo {
props: Props,
toggle_view: bool,
fetch_task: Option<FetchTask>,
json: Option<Struture>,
link: ComponentLink<Self>,
error: Option<String>,
}
impl LoadInfo {
fn view_json(&self) -> Html {
let mut labels: Vec<Html> = Vec::new();
match self.json {
Some(ref content) => {
html!{
<>
<img src=content.link.clone() style="width: 300px"/>
</>
}
}
None => {
html! {}
}
}
}
fn view_fetching(&self) -> Html {
if self.fetch_task.is_some() {
html! {}
} else {
html! {}
}
}
fn view_error(&self) -> Html {
if let Some(ref error) = self.error {
html! {<p>{ error.clone() }</p>}
} else {
html! {}
}
}
}
impl Component for LoadInfo {
type Message = Msg;
type Properties = Props;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
let callback = link.callback(|_msg: Msg| Msg::GetInfo);
callback.emit(Msg::GetInfo);
Self {
props,
toggle_view: false,
fetch_task: None,
json: None,
link,
error: None,
}
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
false
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
use Msg::*;
match msg {
GetInfo => {
self.toggle_view = !self.toggle_view;
let request = Request::get(format!("https://unisulma-ti-default-rtdb.firebaseio.com/info/marcas/{}/0/.json", self.props.marca.clone()))
.body(Nothing)
.expect("Não foi possível efetuar o request.");
let callback =
self.link
.callback(|response: Response<Json<Result<Struture, anyhow::Error>>>| {
let Json(data) = response.into_body();
Msg::ReceiveResponse(data)
});
let task = FetchService::fetch(request, callback).expect("Falha ao iniciar o request");
self.fetch_task = Some(task);
true
}
ReceiveResponse(response) => {
match response {
Ok(dados) => {
self.json = Some(dados);
}
Err(error) => {
self.error = Some(error.to_string())
}
}
self.fetch_task = None;
true
}
}
}
fn view(&self) -> Html {
html! {
<>
{ self.view_fetching() }
{ self.view_json() }
{ self.view_error() }
</>
}
}
} | 27.787879 | 152 | 0.442203 |
e5980369d5119f65d453a92d7da2259dac4ea1a4 | 4,534 | //! Get filesystem statistics
//!
//! See [the man pages](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fstatvfs.html)
//! for more details.
use std::mem;
use std::os::unix::io::AsRawFd;
use libc::{self, c_ulong};
use {Result, NixPath};
use errno::Errno;
libc_bitflags!(
/// File system mount Flags
#[repr(C)]
#[derive(Default)]
pub struct FsFlags: c_ulong {
/// Read Only
ST_RDONLY;
/// Do not allow the set-uid bits to have an effect
ST_NOSUID;
/// Do not interpret character or block-special devices
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NODEV;
/// Do not allow execution of binaries on the filesystem
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NOEXEC;
/// All IO should be done synchronously
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_SYNCHRONOUS;
/// Allow mandatory locks on the filesystem
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_MANDLOCK;
/// Write on file/directory/symlink
#[cfg(target_os = "linux")]
ST_WRITE;
/// Append-only file
#[cfg(target_os = "linux")]
ST_APPEND;
/// Immutable file
#[cfg(target_os = "linux")]
ST_IMMUTABLE;
/// Do not update access times on files
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NOATIME;
/// Do not update access times on files
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NODIRATIME;
/// Update access time relative to modify/change time
#[cfg(any(target_os = "android", all(target_os = "linux", not(target_env = "musl"))))]
ST_RELATIME;
}
);
/// Wrapper around the POSIX `statvfs` struct
///
/// For more information see the [`statvfs(3)` man pages](http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_statvfs.h.html).
// FIXME: Replace with repr(transparent)
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Statvfs(libc::statvfs);
impl Statvfs {
/// get the file system block size
pub fn block_size(&self) -> c_ulong {
self.0.f_bsize
}
/// Get the fundamental file system block size
pub fn fragment_size(&self) -> c_ulong {
self.0.f_frsize
}
/// Get the number of blocks.
///
/// Units are in units of `fragment_size()`
pub fn blocks(&self) -> libc::fsblkcnt_t {
self.0.f_blocks
}
/// Get the number of free blocks in the file system
pub fn blocks_free(&self) -> libc::fsblkcnt_t {
self.0.f_bfree
}
/// Get the number of free blocks for unprivileged users
pub fn blocks_available(&self) -> libc::fsblkcnt_t {
self.0.f_bavail
}
/// Get the total number of file inodes
pub fn files(&self) -> libc::fsfilcnt_t {
self.0.f_files
}
/// Get the number of free file inodes
pub fn files_free(&self) -> libc::fsfilcnt_t {
self.0.f_ffree
}
/// Get the number of free file inodes for unprivileged users
pub fn files_available(&self) -> libc::fsfilcnt_t {
self.0.f_favail
}
/// Get the file system id
pub fn filesystem_id(&self) -> c_ulong {
self.0.f_fsid
}
/// Get the mount flags
pub fn flags(&self) -> FsFlags {
FsFlags::from_bits_truncate(self.0.f_flag)
}
/// Get the maximum filename length
pub fn name_max(&self) -> c_ulong {
self.0.f_namemax
}
}
/// Return a `Statvfs` object with information about the `path`
pub fn statvfs<P: ?Sized + NixPath>(path: &P) -> Result<Statvfs> {
unsafe {
Errno::clear();
let mut stat: Statvfs = mem::uninitialized();
let res = path.with_nix_path(|path|
libc::statvfs(path.as_ptr(), &mut stat.0)
)?;
Errno::result(res).map(|_| stat)
}
}
/// Return a `Statvfs` object with information about `fd`
pub fn fstatvfs<T: AsRawFd>(fd: &T) -> Result<Statvfs> {
unsafe {
Errno::clear();
let mut stat: Statvfs = mem::uninitialized();
Errno::result(libc::fstatvfs(fd.as_raw_fd(), &mut stat.0)).map(|_| stat)
}
}
#[cfg(test)]
mod test {
use std::fs::File;
use sys::statvfs::*;
#[test]
fn statvfs_call() {
statvfs("/".as_bytes()).unwrap();
}
#[test]
fn fstatvfs_call() {
let root = File::open("/").unwrap();
fstatvfs(&root).unwrap();
}
}
| 28.161491 | 135 | 0.593075 |
ed08b96c1e8e2ea43b553e0d93f8f12dd547c2de | 5,223 | use std::path::PathBuf;
use ropey::Rope;
use crate::{
history::{Edit, History},
marks::MarkSet,
};
/// A path for an open text buffer.
///
/// This indicates where the text data of the buffer came from, and
/// where it should be saved to.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum BufferPath {
File(PathBuf), // A buffer for a normal file on disk.
Temp(usize), // A temporary buffer, with a number ID.
}
/// An open text buffer, currently being edited.
#[derive(Debug, Clone)]
pub struct Buffer {
pub path: BufferPath,
pub is_dirty: bool, // Is this buffer currently out of sync with disk.
pub text: Rope, // The actual text content.
pub mark_sets: Vec<MarkSet>, // MarkSets for cursors, view positions, etc.
history: History,
}
impl Buffer {
pub fn new(text: Rope, path: BufferPath) -> Buffer {
Buffer {
path: path,
is_dirty: false,
text: text,
mark_sets: Vec::new(),
history: History::new(),
}
}
/// Replaces the given range of chars with the given text.
///
/// The range does not have to be ordered (i.e. the first component can be
/// greater than the second).
pub fn edit(&mut self, char_idx_range: (usize, usize), text: &str) {
self.is_dirty = true;
// Get the range, properly ordered.
let (start, end) = if char_idx_range.0 < char_idx_range.1 {
(char_idx_range.0, char_idx_range.1)
} else {
(char_idx_range.1, char_idx_range.0)
};
// Update undo stack.
if char_idx_range.0 == char_idx_range.1 {
// Fast-path for insertion-only edits.
self.history.push_edit(Edit {
char_idx: start,
from: String::new(),
to: text.into(),
});
} else {
self.history.push_edit(Edit {
char_idx: start,
from: self.text.slice(start..end).into(),
to: text.into(),
});
}
// Update mark sets.
let post_len = text.chars().count();
for mark_set in self.mark_sets.iter_mut() {
for mark in mark_set.iter_mut() {
*mark = mark.edit((start, end), post_len);
}
mark_set.make_consistent();
}
// Do removal if needed.
if start != end {
self.text.remove(start..end);
}
// Do insertion if needed.
if !text.is_empty() {
self.text.insert(start, text);
}
}
/// Un-does the last edit if there is one, and returns the range of the
/// edited characters which can be used for e.g. placing a cursor or moving
/// the view.
///
/// Returns None if there is no edit to undo.
pub fn undo(&mut self) -> Option<(usize, usize)> {
if let Some(ed) = self.history.undo() {
self.is_dirty = true;
let pre_len = ed.to.chars().count();
let post_len = ed.from.chars().count();
let (start, end) = (ed.char_idx, ed.char_idx + pre_len);
// Update mark sets.
for mark_set in self.mark_sets.iter_mut() {
for mark in mark_set.iter_mut() {
*mark = mark.edit((start, end), post_len);
}
mark_set.make_consistent();
}
// Do removal if needed.
if start != end {
self.text.remove(start..end);
}
// Do insertion if needed.
if !ed.from.is_empty() {
self.text.insert(start, &ed.from);
}
return Some((start, start + post_len));
} else {
return None;
}
}
/// Re-does the last edit if there is one, and returns the range of the
/// edited characters which can be used for e.g. placing a cursor or moving
/// the view.
///
/// Returns None if there is no edit to redo.
pub fn redo(&mut self) -> Option<(usize, usize)> {
if let Some(ed) = self.history.redo() {
self.is_dirty = true;
let pre_len = ed.from.chars().count();
let post_len = ed.to.chars().count();
let (start, end) = (ed.char_idx, ed.char_idx + pre_len);
// Update mark sets.
for mark_set in self.mark_sets.iter_mut() {
for mark in mark_set.iter_mut() {
*mark = mark.edit((start, end), post_len);
}
mark_set.make_consistent();
}
// Do removal if needed.
if start != end {
self.text.remove(start..end);
}
// Do insertion if needed.
if !ed.to.is_empty() {
self.text.insert(start, &ed.to);
}
return Some((start, start + post_len));
} else {
return None;
}
}
/// Creates a new empty mark set, and returns the set index.
pub fn add_mark_set(&mut self) -> usize {
self.mark_sets.push(MarkSet::new());
return self.mark_sets.len() - 1;
}
}
| 30.017241 | 83 | 0.518859 |
1e0aeada313dea31ff3a6a59da6ea4d78c7e7a8e | 609 | // Copyright 2018 Steven Bosnick
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE-2.0 or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms
#![crate_type = "lib"]
extern crate luther;
#[macro_use]
extern crate luther_derive;
#[derive(Lexer, Debug)]
pub enum Token {
#[luther(regex = "ab")] Ab,
#[luther(regex = "acc*", priority_group = "2")] Acc,
#[luther(regex = "a(bc|de)")] Abcde(String),
}
| 27.681818 | 72 | 0.683087 |
ff09afa42cce529081c9765bd2a95ca7fc753423 | 6,582 | #![cfg(target_os = "macos")]
use std::os::raw::c_void;
use crate::dpi::LogicalSize;
use crate::monitor::MonitorHandle;
use crate::window::{Window, WindowBuilder};
/// Additional methods on `Window` that are specific to MacOS.
pub trait WindowExtMacOS {
/// Returns a pointer to the cocoa `NSWindow` that is used by this window.
///
/// The pointer will become invalid when the `Window` is destroyed.
fn get_nswindow(&self) -> *mut c_void;
/// Returns a pointer to the cocoa `NSView` that is used by this window.
///
/// The pointer will become invalid when the `Window` is destroyed.
fn get_nsview(&self) -> *mut c_void;
/// Request user attention, causing the application's dock icon to bounce.
/// Note that this has no effect if the application is already focused.
///
/// The `is_critical` flag has the following effects:
/// - `false`: the dock icon will only bounce once.
/// - `true`: the dock icon will bounce until the application is focused.
fn request_user_attention(&self, is_critical: bool);
/// Returns whether or not the window is in simple fullscreen mode.
fn get_simple_fullscreen(&self) -> bool;
/// Toggles a fullscreen mode that doesn't require a new macOS space.
/// Returns a boolean indicating whether the transition was successful (this
/// won't work if the window was already in the native fullscreen).
///
/// This is how fullscreen used to work on macOS in versions before Lion.
/// And allows the user to have a fullscreen window without using another
/// space or taking control over the entire monitor.
fn set_simple_fullscreen(&self, fullscreen: bool) -> bool;
}
impl WindowExtMacOS for Window {
#[inline]
fn get_nswindow(&self) -> *mut c_void {
self.window.get_nswindow()
}
#[inline]
fn get_nsview(&self) -> *mut c_void {
self.window.get_nsview()
}
#[inline]
fn request_user_attention(&self, is_critical: bool) {
self.window.request_user_attention(is_critical)
}
#[inline]
fn get_simple_fullscreen(&self) -> bool {
self.window.get_simple_fullscreen()
}
#[inline]
fn set_simple_fullscreen(&self, fullscreen: bool) -> bool {
self.window.set_simple_fullscreen(fullscreen)
}
}
/// Corresponds to `NSApplicationActivationPolicy`.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ActivationPolicy {
/// Corresponds to `NSApplicationActivationPolicyRegular`.
Regular,
/// Corresponds to `NSApplicationActivationPolicyAccessory`.
Accessory,
/// Corresponds to `NSApplicationActivationPolicyProhibited`.
Prohibited,
}
impl Default for ActivationPolicy {
fn default() -> Self {
ActivationPolicy::Regular
}
}
/// Additional methods on `WindowBuilder` that are specific to MacOS.
///
/// **Note:** Properties dealing with the titlebar will be overwritten by the `with_decorations` method
/// on the base `WindowBuilder`:
///
/// - `with_titlebar_transparent`
/// - `with_title_hidden`
/// - `with_titlebar_hidden`
/// - `with_titlebar_buttons_hidden`
/// - `with_fullsize_content_view`
pub trait WindowBuilderExtMacOS {
/// Sets the activation policy for the window being built.
fn with_activation_policy(self, activation_policy: ActivationPolicy) -> WindowBuilder;
/// Enables click-and-drag behavior for the entire window, not just the titlebar.
fn with_movable_by_window_background(self, movable_by_window_background: bool) -> WindowBuilder;
/// Makes the titlebar transparent and allows the content to appear behind it.
fn with_titlebar_transparent(self, titlebar_transparent: bool) -> WindowBuilder;
/// Hides the window title.
fn with_title_hidden(self, title_hidden: bool) -> WindowBuilder;
/// Hides the window titlebar.
fn with_titlebar_hidden(self, titlebar_hidden: bool) -> WindowBuilder;
/// Hides the window titlebar buttons.
fn with_titlebar_buttons_hidden(self, titlebar_buttons_hidden: bool) -> WindowBuilder;
/// Makes the window content appear behind the titlebar.
fn with_fullsize_content_view(self, fullsize_content_view: bool) -> WindowBuilder;
/// Build window with `resizeIncrements` property. Values must not be 0.
fn with_resize_increments(self, increments: LogicalSize) -> WindowBuilder;
}
impl WindowBuilderExtMacOS for WindowBuilder {
#[inline]
fn with_activation_policy(mut self, activation_policy: ActivationPolicy) -> WindowBuilder {
self.platform_specific.activation_policy = activation_policy;
self
}
#[inline]
fn with_movable_by_window_background(mut self, movable_by_window_background: bool) -> WindowBuilder {
self.platform_specific.movable_by_window_background = movable_by_window_background;
self
}
#[inline]
fn with_titlebar_transparent(mut self, titlebar_transparent: bool) -> WindowBuilder {
self.platform_specific.titlebar_transparent = titlebar_transparent;
self
}
#[inline]
fn with_titlebar_hidden(mut self, titlebar_hidden: bool) -> WindowBuilder {
self.platform_specific.titlebar_hidden = titlebar_hidden;
self
}
#[inline]
fn with_titlebar_buttons_hidden(mut self, titlebar_buttons_hidden: bool) -> WindowBuilder {
self.platform_specific.titlebar_buttons_hidden = titlebar_buttons_hidden;
self
}
#[inline]
fn with_title_hidden(mut self, title_hidden: bool) -> WindowBuilder {
self.platform_specific.title_hidden = title_hidden;
self
}
#[inline]
fn with_fullsize_content_view(mut self, fullsize_content_view: bool) -> WindowBuilder {
self.platform_specific.fullsize_content_view = fullsize_content_view;
self
}
#[inline]
fn with_resize_increments(mut self, increments: LogicalSize) -> WindowBuilder {
self.platform_specific.resize_increments = Some(increments.into());
self
}
}
/// Additional methods on `MonitorHandle` that are specific to MacOS.
pub trait MonitorHandleExtMacOS {
/// Returns the identifier of the monitor for Cocoa.
fn native_id(&self) -> u32;
/// Returns a pointer to the NSScreen representing this monitor.
fn get_nsscreen(&self) -> Option<*mut c_void>;
}
impl MonitorHandleExtMacOS for MonitorHandle {
#[inline]
fn native_id(&self) -> u32 {
self.inner.get_native_identifier()
}
fn get_nsscreen(&self) -> Option<*mut c_void> {
self.inner.get_nsscreen().map(|s| s as *mut c_void)
}
}
| 35.967213 | 105 | 0.704193 |
33da059a1fc05bc8ce10434a700b9e7079370172 | 15,029 | use crate::{
account::{self, Identifier},
rewards::Ratio,
stake::Stake,
utxo,
};
use chain_addr::{Address, Kind};
use imhamt::Hamt;
use std::{collections::hash_map::DefaultHasher, fmt, num::NonZeroU64};
#[derive(Default, Clone, Eq, PartialEq)]
pub struct StakeControl {
assigned: Stake,
unassigned: Stake,
control: Hamt<DefaultHasher, Identifier, Stake>,
}
impl StakeControl {
pub fn new() -> Self {
Self::default()
}
fn update_accounts(&self, accounts: &account::Ledger) -> Self {
accounts
.iter()
.fold(self.clone(), |sc, (identifier, account)| {
sc.add_to(identifier.clone(), Stake::from_value(account.value()))
})
}
fn update_utxos(&self, utxos: &utxo::Ledger<Address>) -> Self {
utxos.values().fold(self.clone(), |sc, output| {
let stake = Stake::from_value(output.value);
// We're only interested in "group" addresses
// (i.e. containing a spending key and a stake key).
match output.address.kind() {
Kind::Account(_) | Kind::Multisig(_) => {
// single or multisig account are not present in utxos
panic!("internal error: accounts in utxo")
}
Kind::Script(_) => {
// scripts are not present in utxo
panic!("internal error: script in utxo")
}
Kind::Group(_spending_key, account_key) => {
let identifier = account_key.clone().into();
sc.add_to(identifier, stake)
}
Kind::Single(_) => sc.add_unassigned(stake),
}
})
}
pub fn new_with(accounts: &account::Ledger, utxos: &utxo::Ledger<Address>) -> Self {
Self::new().update_accounts(accounts).update_utxos(utxos)
}
pub fn total(&self) -> Stake {
self.assigned + self.unassigned
}
pub fn assigned(&self) -> Stake {
self.assigned
}
pub fn unassigned(&self) -> Stake {
self.unassigned
}
/// get the total stake controlled by the given account
pub fn by(&self, identifier: &Identifier) -> Option<Stake> {
self.control.lookup(identifier).copied()
}
/// get the ratio controlled by the given account
///
/// the ratio is based on the total assigned stake, stake that is
/// not controlled (that is in UTxO without account keys) are not
/// part of the equation.
///
pub fn ratio_by(&self, identifier: &Identifier) -> Ratio {
if let Some(stake) = self.by(identifier) {
Ratio {
numerator: stake.0,
denominator: unsafe {
// the assigned cannot be `0` because there must
// be at least the account's stake which is non
// nul
NonZeroU64::new_unchecked(self.assigned().0)
},
}
} else {
Ratio::zero()
}
}
#[must_use = "internal state is not modified"]
pub fn add_unassigned(&self, stake: Stake) -> Self {
Self {
assigned: self.assigned,
unassigned: self.unassigned.wrapping_add(stake),
control: self.control.clone(),
}
}
#[must_use = "internal state is not modified"]
pub fn remove_unassigned(&self, stake: Stake) -> Self {
Self {
assigned: self.assigned,
unassigned: self.unassigned.wrapping_sub(stake),
control: self.control.clone(),
}
}
/// add the given amount of stake to the given identifier
///
/// also update the total stake
#[must_use = "internal state is not modified"]
pub fn add_to(&self, identifier: Identifier, stake: Stake) -> Self {
let control = self
.control
.insert_or_update_simple(identifier, stake, |v: &Stake| v.checked_add(stake));
Self {
control,
assigned: self.assigned.wrapping_add(stake),
unassigned: self.unassigned,
}
}
/// add the given amount of stake to the given identifier
///
/// also update the total stake
#[must_use = "internal state is not modified"]
pub fn remove_from(&self, identifier: Identifier, stake: Stake) -> Self {
use std::convert::Infallible;
let control = self.control.update(&identifier, |v| {
Result::<Option<Stake>, Infallible>::Ok(v.checked_sub(stake))
});
let control = match control {
Ok(updated) => updated,
Err(reason) => {
debug_assert!(
false,
"Removing {:?} from an account ({}) that does not exist: {:?}",
stake, identifier, reason,
);
self.control.clone()
}
};
Self {
control,
assigned: self.assigned.wrapping_sub(stake),
unassigned: self.unassigned,
}
}
}
impl fmt::Debug for StakeControl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"unassigned: {}, assigned: {}, control: {:?}",
self.unassigned,
self.assigned,
self.control
.iter()
.map(|(id, account)| (id.clone(), *account))
.collect::<Vec<(Identifier, Stake)>>()
)
}
}
#[cfg(test)]
mod tests {
use super::StakeControl;
use crate::{
account::{self, Identifier},
rewards::Ratio,
stake::Stake,
testing::{utxo::ArbitaryLedgerUtxo, TestGen},
};
use quickcheck_macros::quickcheck;
use std::num::NonZeroU64;
fn create_stake_control_from(
assigned: &[(Identifier, Stake)],
unassigned: Stake,
) -> StakeControl {
let stake_control: StakeControl = assigned
.iter()
.fold(StakeControl::new(), |sc, (identifier, stake)| {
sc.add_to(identifier.clone(), *stake)
});
stake_control.add_unassigned(unassigned)
}
#[test]
pub fn empty_stake_control() {
let random_identifier = TestGen::identifier();
let stake_control = create_stake_control_from(&[], Stake::zero());
assert_eq!(stake_control.total(), Stake::zero());
assert_eq!(stake_control.unassigned(), Stake::zero());
assert_eq!(stake_control.assigned(), Stake::zero());
assert_eq!(stake_control.by(&random_identifier), None);
let expected_ratio = Ratio {
numerator: 0,
denominator: NonZeroU64::new(1).unwrap(),
};
assert_eq!(stake_control.ratio_by(&random_identifier), expected_ratio);
}
#[test]
pub fn stake_control_only_assigned() {
let identifier = TestGen::identifier();
let initial_stake = Stake(100);
let stake_control =
create_stake_control_from(&[(identifier.clone(), initial_stake)], Stake::zero());
assert_eq!(stake_control.total(), initial_stake);
assert_eq!(stake_control.unassigned(), Stake::zero());
assert_eq!(stake_control.assigned(), initial_stake);
assert_eq!(stake_control.by(&identifier).unwrap(), initial_stake);
let expected_ratio = Ratio {
numerator: 100,
denominator: NonZeroU64::new(100).unwrap(),
};
assert_eq!(stake_control.ratio_by(&identifier), expected_ratio);
}
#[test]
pub fn stake_control_only_unassigned() {
let identifier = TestGen::identifier();
let initial_stake = Stake(100);
let stake_control = create_stake_control_from(&[], initial_stake);
assert_eq!(stake_control.total(), initial_stake);
assert_eq!(stake_control.unassigned(), initial_stake);
assert_eq!(stake_control.assigned(), Stake::zero());
assert_eq!(stake_control.by(&identifier), None);
let expected_ratio = Ratio {
numerator: 0,
denominator: NonZeroU64::new(1).unwrap(),
};
assert_eq!(stake_control.ratio_by(&identifier), expected_ratio);
}
#[test]
pub fn stake_control_unassigned_and_assigned() {
let identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let stake_control =
create_stake_control_from(&[(identifier.clone(), stake_to_add)], stake_to_add);
assert_eq!(stake_control.total(), Stake(200));
assert_eq!(stake_control.unassigned(), stake_to_add);
assert_eq!(stake_control.assigned(), stake_to_add);
assert_eq!(stake_control.by(&identifier), Some(stake_to_add));
let expected_ratio = Ratio {
numerator: 100,
denominator: NonZeroU64::new(100).unwrap(),
};
assert_eq!(stake_control.ratio_by(&identifier), expected_ratio);
}
#[test]
pub fn stake_control_remove_part_of_assigned() {
let identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let stake_to_sub = Stake(50);
let mut stake_control =
create_stake_control_from(&[(identifier.clone(), stake_to_add)], stake_to_add);
stake_control = stake_control.remove_from(identifier.clone(), stake_to_sub);
assert_eq!(stake_control.total(), Stake(150));
assert_eq!(stake_control.unassigned(), Stake(100));
assert_eq!(stake_control.assigned(), Stake(50));
assert_eq!(stake_control.by(&identifier), Some(Stake(50)));
let expected_ratio = Ratio {
numerator: 50,
denominator: NonZeroU64::new(50).unwrap(),
};
assert_eq!(stake_control.ratio_by(&identifier), expected_ratio);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic(expected = "KeyNotFound")]
pub fn stake_control_remove_non_existing_assigned_debug() {
let non_existing_identifier = TestGen::identifier();
let existing_identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let stake_control =
create_stake_control_from(&[(existing_identifier, stake_to_add)], stake_to_add);
assert_eq!(stake_control.total(), Stake(200));
let _ = stake_control.remove_from(non_existing_identifier, stake_to_add);
}
#[test]
#[cfg(not(debug_assertions))]
pub fn stake_control_remove_non_existing_assigned_release() {
let non_existing_identifier = TestGen::identifier();
let existing_identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let stake_control =
create_stake_control_from(&[(existing_identifier, stake_to_add.clone())], stake_to_add);
assert_eq!(stake_control.total(), Stake(200));
let _ = stake_control.remove_from(non_existing_identifier, stake_to_add);
}
#[test]
pub fn stake_control_remove_all_assigned() {
let identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let mut stake_control =
create_stake_control_from(&[(identifier.clone(), stake_to_add)], stake_to_add);
assert_eq!(stake_control.total(), Stake(200));
stake_control = stake_control.remove_from(identifier.clone(), stake_to_add);
assert_eq!(stake_control.total(), Stake(100));
assert_eq!(stake_control.unassigned(), Stake(100));
assert_eq!(stake_control.assigned(), Stake::zero());
assert_eq!(stake_control.by(&identifier), Some(Stake::zero()));
unsafe {
let expected_ratio = Ratio {
numerator: 0,
denominator: NonZeroU64::new_unchecked(0),
};
assert_eq!(stake_control.ratio_by(&identifier), expected_ratio);
}
}
#[test]
pub fn stake_control_remove_unassigned() {
let identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let stake_control =
create_stake_control_from(&[(identifier.clone(), stake_to_add)], stake_to_add);
assert_eq!(stake_control.total(), Stake(200));
assert_eq!(stake_control.unassigned(), stake_to_add);
assert_eq!(stake_control.assigned(), stake_to_add);
assert_eq!(stake_control.by(&identifier), Some(stake_to_add));
let expected_ratio = Ratio {
numerator: 100,
denominator: NonZeroU64::new(100).unwrap(),
};
assert_eq!(stake_control.ratio_by(&identifier), expected_ratio);
}
#[test]
pub fn stake_control_remove_all() {
let identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let mut stake_control =
create_stake_control_from(&[(identifier.clone(), stake_to_add)], stake_to_add);
stake_control = stake_control.remove_from(identifier.clone(), stake_to_add);
stake_control = stake_control.remove_unassigned(stake_to_add);
assert_eq!(stake_control.total(), Stake::zero());
assert_eq!(stake_control.unassigned(), Stake::zero());
assert_eq!(stake_control.assigned(), Stake::zero());
assert_eq!(stake_control.by(&identifier), Some(Stake::zero()));
}
#[test]
pub fn stake_control_account_ratio() {
let first_identifier = TestGen::identifier();
let second_identifier = TestGen::identifier();
let stake_to_add = Stake(100);
let stake_control = create_stake_control_from(
&[
(first_identifier.clone(), stake_to_add),
(second_identifier.clone(), stake_to_add),
],
stake_to_add,
);
assert_eq!(stake_control.by(&first_identifier), Some(stake_to_add));
assert_eq!(stake_control.by(&second_identifier), Some(stake_to_add));
assert_eq!(stake_control.by(&first_identifier), Some(stake_to_add));
assert_eq!(stake_control.by(&second_identifier), Some(stake_to_add));
let expected_ratio = Ratio {
numerator: 100,
denominator: NonZeroU64::new(200).unwrap(),
};
assert_eq!(stake_control.ratio_by(&first_identifier), expected_ratio);
let expected_ratio = Ratio {
numerator: 100,
denominator: NonZeroU64::new(200).unwrap(),
};
assert_eq!(stake_control.ratio_by(&second_identifier), expected_ratio);
}
#[quickcheck]
pub fn stake_control_from_ledger(accounts: account::Ledger, utxos: ArbitaryLedgerUtxo) {
let stake_control = StakeControl::new_with(&accounts, &utxos.0);
//verify sum
let accounts = accounts.get_total_value().unwrap();
let utxo_or_group = utxos.0.values().map(|x| x.value).sum();
let expected_sum = accounts
.checked_add(utxo_or_group)
.expect("cannot calculate expected total");
assert_eq!(stake_control.total(), expected_sum.into());
}
}
| 35.114486 | 100 | 0.596513 |
9068b6d8f3570a5c875053e1144a69196c1915a1 | 2,989 | //! Macros used on ouch.
use crate::accessible::is_running_in_accessible_mode;
/// Macro that prints \[INFO\] messages, wraps [`println`].
///
/// There are essentially two different versions of the `info!()` macro:
/// - `info!(accessible, ...)` should only be used for short, important
/// information which is expected to be useful for e.g. blind users whose
/// text-to-speach systems read out every output line, which is why we
/// should reduce nonessential output to a minimum when running in
/// ACCESSIBLE mode
/// - `info!(inaccessible, ...)` can be used more carelessly / for less
/// important information. A seeing user can easily skim through more lines
/// of output, so e.g. reporting every single processed file can be helpful,
/// while it would generate long and hard to navigate text for blind people
/// who have to have each line of output read to them aloud, whithout to
/// ability to skip some lines deemed not important like a seeing person would.
///
/// By default `info` outputs to Stdout, if you want to specify the output you can use
/// `@display_handle` modifier
#[macro_export]
macro_rules! info {
// Accessible (short/important) info message.
// Show info message even in ACCESSIBLE mode
(accessible, $($arg:tt)*) => {
info!(@::std::io::stdout(), accessible, $($arg)*);
};
(@$display_handle: expr, accessible, $($arg:tt)*) => {
let display_handle = &mut $display_handle;
// if in ACCESSIBLE mode, suppress the "[INFO]" and just print the message
if !$crate::accessible::is_running_in_accessible_mode() {
$crate::macros::_info_helper(display_handle);
}
writeln!(display_handle, $($arg)*).unwrap();
};
// Inccessible (long/no important) info message.
// Print info message if ACCESSIBLE is not turned on
(inaccessible, $($arg:tt)*) => {
info!(@::std::io::stdout(), inaccessible, $($arg)*);
};
(@$display_handle: expr, inaccessible, $($arg:tt)*) => {
if !$crate::accessible::is_running_in_accessible_mode() {
let display_handle = &mut $display_handle;
$crate::macros::_info_helper(display_handle);
writeln!(display_handle, $($arg)*).unwrap();
}
};
}
/// Helper to display "\[INFO\]", colored yellow
pub fn _info_helper(handle: &mut impl std::io::Write) {
use crate::utils::colors::{RESET, YELLOW};
write!(handle, "{}[INFO]{} ", *YELLOW, *RESET).unwrap();
}
/// Macro that prints \[WARNING\] messages, wraps [`eprintln`].
#[macro_export]
macro_rules! warning {
($($arg:tt)*) => {
$crate::macros::_warning_helper();
eprintln!($($arg)*);
};
}
/// Helper to display "\[WARNING\]", colored orange
pub fn _warning_helper() {
use crate::utils::colors::{ORANGE, RESET};
if is_running_in_accessible_mode() {
eprint!("{}Warning:{} ", *ORANGE, *RESET);
} else {
eprint!("{}[WARNING]{} ", *ORANGE, *RESET);
}
}
| 38.320513 | 86 | 0.635999 |
9154bc11fd64c9e6b972ce81414f82213e848e95 | 2,302 | use bellperson::{ConstraintSystem, SynthesisError};
use merkletree::hash::{Algorithm as LightAlgorithm, Hashable as LightHashable};
use merkletree::merkle::Element;
use paired::bls12_381::{Fr, FrRepr};
use rand::Rand;
use sapling_crypto::circuit::{boolean, num};
use sapling_crypto::jubjub::JubjubEngine;
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
use crate::error::Result;
pub trait Domain:
Ord
+ Copy
+ Clone
+ AsRef<[u8]>
+ Default
+ ::std::fmt::Debug
+ Eq
+ Send
+ Sync
+ From<Fr>
+ From<FrRepr>
+ Into<Fr>
+ Rand
+ Serialize
+ DeserializeOwned
+ Element
{
fn serialize(&self) -> Vec<u8>;
fn into_bytes(&self) -> Vec<u8>;
fn try_from_bytes(raw: &[u8]) -> Result<Self>;
/// Write itself into the given slice, LittleEndian bytes.
fn write_bytes(&self, _: &mut [u8]) -> Result<()>;
}
pub trait HashFunction<T: Domain>:
Clone + ::std::fmt::Debug + Eq + Send + Sync + LightAlgorithm<T>
{
fn hash(data: &[u8]) -> T;
fn hash_leaf(data: &LightHashable<Self>) -> T {
let mut a = Self::default();
data.hash(&mut a);
let item_hash = a.hash();
a.leaf(item_hash)
}
fn hash_single_node(data: &LightHashable<Self>) -> T {
let mut a = Self::default();
data.hash(&mut a);
a.hash()
}
fn hash_leaf_circuit<E: JubjubEngine, CS: ConstraintSystem<E>>(
cs: CS,
left: &[boolean::Boolean],
right: &[boolean::Boolean],
height: usize,
params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError>;
fn hash_circuit<E: JubjubEngine, CS: ConstraintSystem<E>>(
cs: CS,
bits: &[boolean::Boolean],
params: &E::Params,
) -> std::result::Result<num::AllocatedNum<E>, SynthesisError>;
}
pub trait Hasher: Clone + ::std::fmt::Debug + Eq + Default + Send + Sync {
type Domain: Domain + LightHashable<Self::Function>;
type Function: HashFunction<Self::Domain>;
fn kdf(data: &[u8], m: usize) -> Self::Domain;
fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain;
fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain;
fn name() -> String;
}
| 28.419753 | 98 | 0.609904 |
48964af5eeb4f8865cc1410243cb533aa33eddce | 2,472 | extern crate nix;
use job_manager::*;
use self::nix::unistd::{getpid, tcsetpgrp};
use self::nix::sys::signal::kill;
use self::nix::sys::signal;
pub fn bg(args: String) -> i8 {
let mut err_code = 0;
let size = args.split_whitespace().count();
match size {
0 => {
if JOB.lock().unwrap().get_size() == 0 {
println!("bg: No running jobs");
err_code = 1;
} else {
let a = JOB.lock().unwrap().pop_stopped();
match a {
Some((pid, name, State::Stopped)) => {
println!("Send job {} to background : {}", name, pid);
kill(-pid, signal::SIGCONT).expect("sigcont failed");
JOB.lock().unwrap().push(pid, name, State::Running);
tcsetpgrp(1, getpid()).expect("tcsetpgrp failed");
},
_ => {
println!("bg: No stopped jobs");
err_code = 1;
}
}
//let Some()(pid, name, state) = JOB.lock().unwrap().pop_stopped();
//match JOB.lock().unwrap().pop_stopped() {
// Some((pid, name, _)) => {
// println!("Send job {} to background", name);
// tcsetpgrp(1, getpid());
// JOB.lock().unwrap().push(pid, name, State::Running);
// kill(pid, signal::SIGCONT).expect("sigcont failed");
// },
// None => {
// println!("bg: No stopped jobs");
// err_code = 1;
// }
//
//}
//match state {
// State::Stopped => {
// println!("Send job {} to background", name);
// kill(pid, signal::SIGCONT).expect("sigcont failed");
// JOB.lock().unwrap().push(pid, name, State::Running);
// tcsetpgrp(1, getpid());
// },
// _ => {
// println!("bg: No stopped jobs");
// err_code = 1;
// },
//}
}
}
_ => {
err_code = 1;
println!("Too many arguments for bg command");
},
}
err_code
}
| 38.030769 | 83 | 0.37945 |
e5ce0b359c91b3e4689bec87868c23d4688ba47c | 4,708 | use ndarray::prelude::*;
use num_traits::AsPrimitive;
use crate::ops::prelude::*;
#[derive(Debug, Clone, new)]
pub struct Shape {
dt: DatumType,
}
impl Shape {
pub fn coerce_to<T>(shape: &[usize]) -> TractResult<SharedTensor>
where
T: Datum,
usize: AsPrimitive<T>,
{
let array = Array1::from_vec(shape.iter().map(|i| i.as_()).collect());
Ok(array.into())
}
}
impl Op for Shape {
fn name(&self) -> Cow<str> {
"Shape".into()
}
}
impl StatelessOp for Shape {
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: TVec<SharedTensor>) -> TractResult<TVec<SharedTensor>> {
let shape = inputs[0].shape();
Ok(tvec![dispatch_numbers!(Self::coerce_to(self.dt)(&shape))?])
}
}
impl InferenceRulesOp for Shape {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
s: &mut Solver<'r>,
inputs: &'p SharedTensorsProxy,
outputs: &'p SharedTensorsProxy,
) -> InferenceResult {
s.equals(&inputs.len, 1)?;
s.equals(&outputs.len, 1)?;
s.equals(&outputs[0].rank, 1)?;
s.given(&inputs[0].rank, move |s, r| {
s.equals(&outputs[0].shape[0], r.to_dim())
})?;
s.given(&outputs[0].shape[0], move |s, r| {
if let Ok(d) = r.to_integer() {
s.equals(&inputs[0].rank, d)?;
}
Ok(())
})?;
s.given(&inputs[0].shape, move |s, shape| {
if shape.iter().any(|&d| d.to_integer().is_err()) {
s.equals(&outputs[0].datum_type, DatumType::TDim)?;
let array1: Array1<TDim> = Array1::from_iter(shape);
let tensor: SharedTensor = array1.into();
s.equals(&outputs[0].value, tensor)
} else if self.dt == DatumType::I64 {
s.equals(&outputs[0].datum_type, DatumType::I64)?;
let array1: Array1<i64> = Array1::from_vec(
shape
.iter()
.map(|&i| i.to_integer().unwrap() as i64)
.collect(),
);
let tensor: SharedTensor = array1.into();
s.equals(&outputs[0].value, tensor)
} else {
s.equals(&outputs[0].datum_type, DatumType::I32)?;
let array1: Array1<i32> = Array1::from_vec(
shape
.iter()
.map(|&i| i.to_integer().unwrap() as i32)
.collect(),
);
let tensor: SharedTensor = array1.into();
s.equals(&outputs[0].value, tensor)
}
})
}
}
/*
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn shape_inference_1() {
let input = TensorFact {
datum_type: typefact!(DatumType::F32),
shape: shapefact![1, _, _; ..],
value: valuefact!(_),
};
let output = TensorFact {
datum_type: typefact!(DatumType::TDim),
shape: shapefact![_],
value: valuefact!(_),
};
assert_forward!(Shape::new(DatumType::I32), input, output);
}
#[test]
fn shape_inference_2() {
let input = TensorFact {
datum_type: typefact!(DatumType::F32),
shape: shapefact![1, _, _],
value: valuefact!(_),
};
let output = TensorFact {
datum_type: typefact!(DatumType::TDim),
shape: shapefact![3],
value: valuefact!(_),
};
assert_forward!(Shape::new(DatumType::I32), input, output);
}
#[test]
fn shape_inference_3() {
let input = TensorFact {
datum_type: typefact!(DatumType::F32),
shape: shapefact![1, 2, 3],
value: valuefact!(_),
};
let output = TensorFact {
datum_type: typefact!(DatumType::TDim),
shape: shapefact![3],
value: valuefact!(Tensor::dims(&[3], &[1.to_dim(), 2.to_dim(), 3.to_dim()]).unwrap()),
};
assert_forward!(Shape::new(DatumType::I32), input, output);
}
#[test]
fn shape_inference_4() {
let input = TensorFact {
datum_type: typefact!(_),
shape: shapefact![1, 2, 3],
value: valuefact!(_),
};
let output = TensorFact {
datum_type: typefact!(DatumType::TDim),
shape: shapefact![3],
value: valuefact!(Tensor::dims(&[3], &[1.to_dim(), 2.to_dim(), 3.to_dim()]).unwrap()),
};
assert_backward!(Shape::new(DatumType::I32), input, output);
}
}
*/
| 29.425 | 98 | 0.493628 |
29b87d80be7268e8a62846372a93342d6695d78c | 1,041 | extern crate notify_rust;
use notify_rust::Notification;
use notify_rust::NotificationHint as Hint;
fn main() {
#[cfg(all(unix, not(target_os = "macos")))]
Notification::new()
.summary("click me")
.action("default", "default") // IDENTIFIER, LABEL
.action("clicked", "click here") // IDENTIFIER, LABEL
.hint(Hint::Resident(true))
.show()
.unwrap()
.wait_for_action({|action|
match action {
"default" => println!("so boring"),
"clicked" => println!("that was correct"),
// here "__closed" is a hardcoded keyword
"__closed" => println!("the notification was closed"),
_ => ()
}
});
#[cfg(target_os = "macos")]
Notification::new().summary("PLATFORM ERROR")
.subtitle("unsupported functionality")
.body("cannot wait for closing on macOS.")
.show()
.unwrap();
}
| 32.53125 | 70 | 0.505283 |
abd8a0d8de4ea18d131b18c64e2889bb6bc8c651 | 24,607 | use crate::{
attr::AttrStr,
error::Result,
import::import,
resolve::resolve_schema,
schema::{Schema, WithSchema},
};
use pyo3::ffi::*;
use std::{
borrow::Cow,
collections::HashMap,
fmt::{self, Debug},
ops::{Deref, DerefMut},
os::raw::c_char,
ptr::NonNull,
sync::atomic::{AtomicPtr, Ordering},
};
macro_rules! objnew {
($p:expr) => {
$crate::object::Object::new(unsafe { $p })
};
}
macro_rules! cast {
($p:expr) => {
unsafe { &mut $p as *mut _ as *mut PyObject }
};
}
macro_rules! ptr_cast {
($p:expr) => {
unsafe { $p as *mut _ as *mut PyObject }
};
}
macro_rules! is_type {
($object:expr, $name:ident) => {
import()
.ok()
.filter(|o| $object.is(o.$name.as_ptr()))
.is_some()
};
}
macro_rules! is_type_opt {
($object:expr, $name:ident) => {
import()
.ok()
.and_then(|o| o.$name.as_ref())
.filter(|o| $object.is(o.as_ptr()))
.is_some()
};
}
pub struct ObjectRef;
impl ObjectRef {
pub fn new<'a>(p: *mut PyObject) -> Result<&'a Self> {
match unsafe { (p as *mut ObjectRef).as_ref() } {
Some(p) => Ok(p),
None => Err(err!("failed to create an object")),
}
}
pub fn resolve<'a>(
&'a self,
attr: Option<HashMap<&str, &ObjectRef>>,
) -> Result<Cow<'a, Schema>> {
resolve_schema(self, attr)
}
pub fn resolved_object(&self) -> Result<WithSchema<'_>> {
let schema = self.get_type()?.resolve(None)?;
Ok(WithSchema::new(schema, self))
}
pub fn with_schema<'a>(&'a self, schema: &'a Schema) -> WithSchema<'a> {
WithSchema::new(schema.borrowed(), self)
}
pub fn owned(&self) -> Object {
Object::new_clone(self.as_ptr()).unwrap()
}
pub fn get_type(&self) -> Result<&ObjectRef> {
Self::new(ptr_cast!((*self.as_ptr()).ob_type))
}
pub fn set_capsule<'a, T>(&self, s: &AttrStr, item: T) -> Result<&'a T> {
extern "C" fn destructor(p: *mut PyObject) {
let p = unsafe { PyCapsule_GetPointer(p, std::ptr::null_mut()) };
let _b = unsafe { Box::from_raw(p) };
}
let p = Box::new(item);
let p = Box::leak(p);
let obj = Object::new(unsafe {
PyCapsule_New(
p as *mut _ as *mut std::ffi::c_void,
std::ptr::null_mut(),
Some(destructor),
)
})?;
if unsafe { PyObject_SetAttrString(self.as_ptr(), s.as_ptr(), obj.as_ptr()) != 0 } {
bail!("cannot set attribute `{}`", s)
} else {
Ok(p)
}
}
pub fn get_capsule<'a, T>(&self, s: &AttrStr) -> Option<&'a T> {
if !self.has_attr(s) {
return None;
}
let obj = self.get_attr(s).ok()?;
let p = unsafe { PyCapsule_GetPointer(obj.as_ptr(), std::ptr::null_mut()) };
if p.is_null() {
None
} else {
Some(unsafe { &*(p as *mut T) })
}
}
pub fn as_bool(&self) -> Result<bool> {
if self.is(unsafe { Py_True() }) {
Ok(true)
} else if self.is(unsafe { Py_False() }) {
Ok(false)
} else {
bail_type_err!("expected `bool` got `{}`: {:?}", self.typename(), self)
}
}
pub fn as_i64(&self) -> Result<i64> {
let p = unsafe { PyLong_AsLongLong(self.as_ptr()) };
if unsafe { !PyErr_Occurred().is_null() } {
bail_type_err!("expected `int` got `{}`: {:?}", self.typename(), self)
} else {
Ok(p)
}
}
pub fn as_u64(&self) -> Result<u64> {
let p = unsafe { PyLong_AsLongLong(self.as_ptr()) };
if unsafe { !PyErr_Occurred().is_null() } {
bail_type_err!("expected `int` got `{}`: {:?}", self.typename(), self)
} else {
Ok(p as u64)
}
}
pub fn as_f64(&self) -> Result<f64> {
let p = unsafe { PyFloat_AsDouble(self.as_ptr()) };
if unsafe { !PyErr_Occurred().is_null() } {
bail_type_err!("expected `float` got `{}`: {:?}", self.typename(), self)
} else {
Ok(p)
}
}
pub fn as_str(&self) -> Result<&str> {
let mut len: Py_ssize_t = 0;
let p = unsafe { PyUnicode_AsUTF8AndSize(self.as_ptr(), &mut len) };
if p.is_null() {
bail_type_err!("expected `str` got `{}`: {:?}", self.typename(), self)
} else {
unsafe {
let slice = std::slice::from_raw_parts(p as *const u8, len as usize);
Ok(std::str::from_utf8(slice).unwrap())
}
}
}
pub fn as_bytes(&self) -> Result<&[u8]> {
let mut len: Py_ssize_t = 0;
let mut buf: *mut c_char = std::ptr::null_mut();
let p = unsafe { PyBytes_AsStringAndSize(self.as_ptr(), &mut buf, &mut len) };
if p == -1 {
bail_type_err!("expected `bytes` got `{}`: {:?}", self.typename(), self)
} else {
unsafe {
let slice = std::slice::from_raw_parts(buf as *const u8, len as usize);
Ok(slice)
}
}
}
pub fn as_bytearray(&self) -> Result<&[u8]> {
let p = unsafe { PyByteArray_AsString(self.as_ptr()) };
let len = unsafe { PyByteArray_Size(self.as_ptr()) };
if p.is_null() {
bail_type_err!("expected `bytearray` got `{}`: {:?}", self.typename(), self)
} else {
unsafe {
let slice = std::slice::from_raw_parts(p as *const u8, len as usize);
Ok(slice)
}
}
}
pub fn as_list(&self) -> ListRef<'_> {
ListRef::new(self)
}
pub fn as_set(&self) -> SetRef<'_> {
SetRef::new(self)
}
pub fn as_tuple(&self) -> TupleRef<'_> {
TupleRef::new(self)
}
pub fn to_str(&self) -> Result<Object> {
Object::new(unsafe { PyObject_Str(self.as_ptr()) })
}
pub fn is(&self, p: *mut PyObject) -> bool {
self.as_ptr() == p
}
pub fn is_none(&self) -> bool {
self.is(unsafe { Py_None() })
}
pub fn is_type(&self) -> bool {
unsafe { (*self.as_ptr()).ob_type == &mut PyType_Type }
}
pub fn is_none_type(&self) -> bool {
self.is(ptr_cast!((*Py_None()).ob_type))
}
pub fn is_bool(&self) -> bool {
self.is(cast!(PyBool_Type))
}
pub fn is_str(&self) -> bool {
self.is(cast!(PyUnicode_Type))
}
pub fn is_int(&self) -> bool {
self.is(cast!(PyLong_Type))
}
pub fn is_float(&self) -> bool {
self.is(cast!(PyFloat_Type))
}
pub fn is_bytes(&self) -> bool {
self.is(cast!(PyBytes_Type))
}
pub fn is_bytearray(&self) -> bool {
self.is(cast!(PyByteArray_Type))
}
pub fn is_dict(&self) -> bool {
self.is(cast!(PyDict_Type)) || is_type!(self, dict)
}
pub fn is_tuple(&self) -> bool {
self.is(cast!(PyTuple_Type)) || is_type!(self, tuple)
}
pub fn is_set(&self) -> bool {
self.is(cast!(PySet_Type)) || is_type!(self, set)
}
pub fn is_list(&self) -> bool {
self.is(cast!(PyList_Type)) || is_type!(self, list)
}
pub fn is_frozen_set(&self) -> bool {
self.is(cast!(PyFrozenSet_Type)) || is_type!(self, frozenset)
}
pub fn is_any(&self) -> bool {
// `Any`, bare `Optional` and bare `Union` can be treated as Any.
is_type!(self, any) || is_type!(self, optional) || is_type!(self, union)
}
pub fn is_generic(&self) -> bool {
self.get_type()
.ok()
.filter(|o| {
is_type!(o, generic_alias)
|| is_type_opt!(o, base_generic_alias)
|| is_type_opt!(o, union_generic_alias)
|| is_type_opt!(o, special_generic_alias)
})
.is_some()
}
pub fn is_builtin_generic(&self) -> bool {
self.get_type()
.ok()
.filter(|o| is_type_opt!(o, types_generic_alias))
.is_some()
}
pub fn is_enum(&self) -> bool {
self.get_type()
.ok()
.filter(|o| is_type!(o, enum_meta))
.is_some()
}
pub fn is_datetime(&self) -> bool {
is_type!(self, datetime)
}
pub fn is_date(&self) -> bool {
is_type!(self, date)
}
pub fn is_time(&self) -> bool {
is_type!(self, time)
}
pub fn is_decimal(&self) -> bool {
is_type!(self, decimal)
}
pub fn is_uuid(&self) -> bool {
is_type!(self, uuid)
}
pub fn name(&self) -> &str {
unsafe {
if self.is_type() {
let p = (*(self.as_ptr() as *mut PyTypeObject)).tp_name;
std::ffi::CStr::from_ptr(p)
.to_str()
.unwrap_or("__unknown__")
} else {
"__unknown__"
}
}
}
pub fn typename(&self) -> &str {
self.get_type().map(|t| t.name()).unwrap_or("__unknown__")
}
pub fn as_ptr(&self) -> *mut PyObject {
&*self as *const Self as *mut Self as *mut PyObject
}
pub fn has_attr(&self, s: &AttrStr) -> bool {
unsafe { PyObject_HasAttrString(self.as_ptr(), s.as_ptr()) != 0 }
}
pub fn get_attr(&self, s: &AttrStr) -> Result<Object> {
objnew!(PyObject_GetAttrString(self.as_ptr(), s.as_ptr()))
}
pub fn get_iter(&self) -> Result<ObjectIter> {
Ok(ObjectIter(objnew!(PyObject_GetIter(self.as_ptr()))?))
}
pub fn get_tuple_iter(&self) -> Result<TupleIter<'_>> {
TupleIter::new(self)
}
pub fn get_dict_iter(&self) -> Result<DictIter<'_>> {
DictIter::new(self)
}
pub fn get(&self, s: &str) -> Result<Object> {
let key = Object::new_str(s)?;
objnew!(PyObject_GetItem(self.as_ptr(), key.as_ptr()))
}
pub fn call(&self, args: Vec<Object>) -> Result<Object> {
let mut tuple = Object::build_tuple(args.len())?;
for (i, arg) in args.into_iter().enumerate() {
tuple.set(i, arg);
}
objnew!(PyObject_CallObject(self.as_ptr(), tuple.build().as_ptr()))
}
pub fn call1(&self, obj: Object) -> Result<Object> {
self.call(vec![obj])
}
pub fn call0(&self) -> Result<Object> {
self.call(vec![])
}
pub fn isoformat(&self) -> Result<Object> {
self.get_attr(&ATTR_ISOFORMAT)?.call0()
}
}
impl Debug for ObjectRef {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_str()
.ok()
.and_then(|o| o.as_str().ok().map(|s| write!(f, "{}", s)))
.unwrap_or_else(|| write!(f, "<unknown>"))
}
}
#[derive(Debug)]
pub struct ObjectIter(Object);
impl Iterator for ObjectIter {
type Item = Result<Object>;
fn next(&mut self) -> Option<Self::Item> {
let p = unsafe { PyIter_Next(self.0.as_ptr()) };
if p.is_null() {
if unsafe { !PyErr_Occurred().is_null() } {
Some(Err(err!("an error occurred during iteration")))
} else {
None
}
} else {
Some(Ok(Object::new(p).unwrap()))
}
}
}
#[derive(Debug)]
pub struct TupleIter<'a> {
p: TupleRef<'a>,
len: usize,
index: usize,
}
impl<'a> TupleIter<'a> {
fn new(p: &'a ObjectRef) -> Result<Self> {
let len = unsafe { PyTuple_Size(p.as_ptr()) as usize };
if unsafe { !PyErr_Occurred().is_null() } {
bail!("cannot get the size of tuple")
}
Ok(Self {
p: TupleRef::new(p),
len,
index: 0,
})
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn len(&self) -> usize {
self.len
}
}
impl<'a> Iterator for TupleIter<'a> {
type Item = &'a ObjectRef;
fn next(&mut self) -> Option<Self::Item> {
if self.index >= self.len {
None
} else {
let item = self.p.get(self.index).ok();
self.index += 1;
item
}
}
}
#[derive(Debug, Clone)]
pub struct DictIter<'a> {
p: &'a ObjectRef,
len: usize,
index: Py_ssize_t,
}
impl<'a> DictIter<'a> {
fn new(p: &'a ObjectRef) -> Result<Self> {
let len = unsafe { PyDict_Size(p.as_ptr()) as usize };
if unsafe { !PyErr_Occurred().is_null() } {
bail!("cannot get the size of dict")
}
Ok(Self { p, len, index: 0 })
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn len(&self) -> usize {
self.len
}
}
impl<'a> Iterator for DictIter<'a> {
type Item = (&'a ObjectRef, &'a ObjectRef);
fn next(&mut self) -> Option<Self::Item> {
let mut k = std::ptr::null_mut();
let mut v = std::ptr::null_mut();
let res = unsafe { PyDict_Next(self.p.as_ptr(), &mut self.index, &mut k, &mut v) };
if res == 0 {
None
} else {
let k = ObjectRef::new(k).ok()?;
let v = ObjectRef::new(v).ok()?;
Some((k, v))
}
}
}
#[derive(PartialEq, Eq)]
pub struct Object(NonNull<ObjectRef>);
impl Object {
pub fn new(p: *mut PyObject) -> Result<Self> {
match NonNull::new(p as *mut ObjectRef) {
Some(p) => Ok(Self(p)),
None => Err(err!("failed to create an object")),
}
}
pub fn new_clone(p: *mut PyObject) -> Result<Self> {
let o = Self::new(p)?;
o.incref();
Ok(o)
}
pub fn new_none() -> Object {
Self::new_clone(unsafe { Py_None() }).unwrap()
}
pub fn new_bool(b: bool) -> Object {
let ptr = match b {
true => unsafe { Py_True() },
false => unsafe { Py_False() },
};
Self::new_clone(ptr).unwrap()
}
pub fn new_i64(v: i64) -> Result<Object> {
Self::new(unsafe { PyLong_FromLongLong(v) })
}
pub fn new_u64(v: u64) -> Result<Object> {
Self::new(unsafe { PyLong_FromUnsignedLongLong(v) })
}
pub fn new_f64(v: f64) -> Result<Object> {
Self::new(unsafe { PyFloat_FromDouble(v) })
}
pub fn new_str(v: &str) -> Result<Object> {
Self::new(unsafe {
PyUnicode_FromStringAndSize(v.as_ptr() as *const c_char, v.len() as Py_ssize_t)
})
}
pub fn new_bytes(v: &[u8]) -> Result<Object> {
Self::new(unsafe {
PyBytes_FromStringAndSize(v.as_ptr() as *const c_char, v.len() as Py_ssize_t)
})
}
pub fn new_bytearray(v: &[u8]) -> Result<Object> {
Self::new(unsafe {
PyByteArray_FromStringAndSize(v.as_ptr() as *const c_char, v.len() as Py_ssize_t)
})
}
pub fn new_unit() -> Result<Object> {
let tuple_type = ObjectRef::new(cast!(PyTuple_Type))?;
tuple_type.call0()
}
pub fn new_default(s: &Schema) -> Result<Object> {
let obj = match s {
Schema::Bool => ObjectRef::new(cast!(PyBool_Type))?.call0()?,
Schema::Int => ObjectRef::new(cast!(PyLong_Type))?.call0()?,
Schema::Float => ObjectRef::new(cast!(PyFloat_Type))?.call0()?,
Schema::Str => ObjectRef::new(cast!(PyUnicode_Type))?.call0()?,
Schema::Bytes => ObjectRef::new(cast!(PyBytes_Type))?.call0()?,
Schema::ByteArray => ObjectRef::new(cast!(PyByteArray_Type))?.call0()?,
Schema::DateTime => import()?.datetime.call0()?,
Schema::Date => import()?.date.call0()?,
Schema::Time => import()?.time.call0()?,
Schema::Decimal => import()?.decimal.call0()?,
Schema::Uuid => import()?.uuid.call0()?,
Schema::Dict(_) => ObjectRef::new(cast!(PyDict_Type))?.call0()?,
Schema::List(_) => ObjectRef::new(cast!(PyList_Type))?.call0()?,
Schema::Set(_) => ObjectRef::new(cast!(PySet_Type))?.call0()?,
Schema::FrozenSet(_) => ObjectRef::new(cast!(PyFrozenSet_Type))?.call0()?,
Schema::Tuple(_) => bail!("cannot use default construction for `tuple`"),
Schema::Class(c) => c.ty.call0()?,
Schema::Enum(_) => bail!("cannot use default construction for `enum`"),
Schema::Union(_) => bail!("cannot use default construction for `union`"),
Schema::Any(_) => bail!("cannot use default construction for `any`"),
};
Ok(obj)
}
pub fn into_datetime(self) -> Result<Object> {
import()?
.datetime
.get_attr(&ATTR_FROMISOFORMAT)?
.call1(self)
}
pub fn into_date(self) -> Result<Object> {
import()?.date.get_attr(&ATTR_FROMISOFORMAT)?.call1(self)
}
pub fn into_time(self) -> Result<Object> {
import()?.time.get_attr(&ATTR_FROMISOFORMAT)?.call1(self)
}
pub fn into_uuid(self) -> Result<Object> {
import()?.uuid.call1(self)
}
pub fn into_decimal(self) -> Result<Object> {
import()?.decimal.call1(self)
}
pub fn build_list(len: usize) -> Result<ListBuilder> {
ListBuilder::new(len)
}
pub fn build_set() -> Result<SetBuilder> {
SetBuilder::new()
}
pub fn build_dict() -> Result<DictBuilder> {
DictBuilder::new()
}
pub fn build_tuple(len: usize) -> Result<TupleBuilder> {
TupleBuilder::new(len)
}
pub fn into_ptr(self) -> *mut PyObject {
let ptr = self.0.as_ptr();
std::mem::forget(self);
ptr as *mut PyObject
}
pub fn into_opt(self) -> Option<Object> {
if self.is_none() {
None
} else {
Some(self)
}
}
fn incref(&self) {
unsafe { Py_INCREF(self.as_ptr()) }
}
fn decref(&self) {
unsafe { Py_DECREF(self.as_ptr()) }
}
}
impl Deref for Object {
type Target = ObjectRef;
fn deref(&self) -> &Self::Target {
unsafe { self.0.as_ref() }
}
}
impl DerefMut for Object {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.0.as_mut() }
}
}
impl AsRef<ObjectRef> for Object {
fn as_ref(&self) -> &ObjectRef {
&self
}
}
impl Clone for Object {
fn clone(&self) -> Self {
self.deref().owned()
}
}
impl Drop for Object {
fn drop(&mut self) {
self.decref()
}
}
impl Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.deref().fmt(f)
}
}
#[derive(Debug, Clone)]
pub struct SetRef<'a>(&'a ObjectRef);
impl<'a> SetRef<'a> {
fn new(obj: &'a ObjectRef) -> Self {
Self(obj)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn len(&self) -> usize {
unsafe { PySet_Size(self.0.as_ptr()) as usize }
}
}
#[derive(Debug, Clone)]
pub struct ListRef<'a>(&'a ObjectRef);
impl<'a> ListRef<'a> {
fn new(obj: &'a ObjectRef) -> Self {
Self(obj)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn len(&self) -> usize {
unsafe { PyList_Size(self.0.as_ptr()) as usize }
}
pub fn get(&self, index: usize) -> Option<&'a ObjectRef> {
let p = unsafe { PyList_GetItem(self.0.as_ptr(), index as Py_ssize_t) };
if p.is_null() {
None
} else {
Some(ObjectRef::new(p).ok()?)
}
}
}
#[derive(Debug, Clone)]
pub struct TupleRef<'a>(&'a ObjectRef);
impl<'a> TupleRef<'a> {
fn new(args: &'a ObjectRef) -> Self {
Self(args)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn len(&self) -> usize {
unsafe { PyTuple_Size(self.0.as_ptr()) as usize }
}
pub fn get(&self, index: usize) -> Result<&'a ObjectRef> {
unsafe { ObjectRef::new(PyTuple_GET_ITEM(self.0.as_ptr(), index as Py_ssize_t)) }
}
}
lazy_static::lazy_static! {
static ref ATTR_ISOFORMAT: AttrStr = AttrStr::new("isoformat");
static ref ATTR_FROMISOFORMAT: AttrStr = AttrStr::new("fromisoformat");
}
#[derive(Debug, Clone)]
pub struct ListBuilder(Object);
impl ListBuilder {
fn new(len: usize) -> Result<Self> {
Ok(Self(objnew!(PyList_New(len as Py_ssize_t))?))
}
pub fn set(&mut self, index: usize, obj: Object) {
unsafe {
// This API steals the pointer, so use `into_ptr`.
PyList_SET_ITEM(self.0.as_ptr(), index as Py_ssize_t, obj.into_ptr());
}
}
pub fn build(self) -> Object {
self.0
}
}
#[derive(Debug, Clone)]
pub struct SetBuilder(Object);
impl SetBuilder {
fn new() -> Result<Self> {
Ok(Self(objnew!(PySet_New(std::ptr::null_mut()))?))
}
pub fn set(&mut self, obj: Object) -> Result<()> {
unsafe {
// This API doesn't steal.
if PySet_Add(self.0.as_ptr(), obj.as_ptr()) != 0 {
bail!("cannot add an item to a set")
}
}
Ok(())
}
pub fn build_frozen(self) -> Result<Object> {
objnew!(PyFrozenSet_New(self.0.as_ptr()))
}
pub fn build(self) -> Object {
self.0
}
}
#[derive(Debug, Clone)]
pub struct DictBuilder(Object);
impl DictBuilder {
fn new() -> Result<Self> {
Ok(Self(objnew!(PyDict_New())?))
}
pub fn set(&mut self, key: Object, value: Object) -> Result<()> {
unsafe {
// This API doesn't steal.
if PyDict_SetItem(self.0.as_ptr(), key.as_ptr(), value.as_ptr()) != 0 {
bail!("cannot set an item to dictionary")
}
}
Ok(())
}
pub fn build(self) -> Object {
self.0
}
}
#[derive(Debug, Clone)]
pub struct TupleBuilder(Object);
impl TupleBuilder {
fn new(len: usize) -> Result<Self> {
Ok(Self(objnew!(PyTuple_New(len as Py_ssize_t))?))
}
pub fn set(&mut self, index: usize, obj: Object) {
unsafe {
// This API steals the pointer, so use `into_ptr`.
PyTuple_SET_ITEM(self.0.as_ptr(), index as Py_ssize_t, obj.into_ptr());
}
}
pub fn build(self) -> Object {
self.0
}
}
pub struct SyncObject(AtomicPtr<PyObject>);
impl SyncObject {
pub fn new(obj: Object) -> Self {
Self(AtomicPtr::new(obj.into_ptr()))
}
pub fn into_ptr(self) -> *mut PyObject {
self.as_ptr()
}
}
impl From<Object> for SyncObject {
fn from(obj: Object) -> Self {
Self::new(obj)
}
}
impl Deref for SyncObject {
type Target = ObjectRef;
fn deref(&self) -> &Self::Target {
ObjectRef::new(self.0.load(Ordering::Relaxed)).unwrap()
}
}
impl PartialEq for SyncObject {
fn eq(&self, other: &SyncObject) -> bool {
self.as_ptr() == other.as_ptr()
}
}
impl Eq for SyncObject {}
impl Clone for SyncObject {
fn clone(&self) -> Self {
Self::new(self.owned())
}
}
impl Drop for SyncObject {
fn drop(&mut self) {
let _ = Object::new(self.as_ptr());
}
}
impl Debug for SyncObject {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.deref().fmt(f)
}
}
#[derive(Debug)]
pub struct ErrorObject {
ptype: SyncObject,
pvalue: SyncObject,
ptraceback: SyncObject,
}
impl ErrorObject {
pub fn new() -> Option<Self> {
if unsafe { PyErr_Occurred().is_null() } {
return None;
}
unsafe {
let mut ptype = std::ptr::null_mut();
let mut pvalue = std::ptr::null_mut();
let mut ptraceback = std::ptr::null_mut();
pyo3::ffi::PyErr_Fetch(&mut ptype, &mut pvalue, &mut ptraceback);
let ptype = Object::new(ptype);
let pvalue = Object::new(pvalue);
let ptraceback = Object::new(ptraceback);
Some(ErrorObject {
ptype: ptype.ok()?.into(),
pvalue: pvalue.ok()?.into(),
ptraceback: ptraceback.ok()?.into(),
})
}
}
pub fn restore(self) {
unsafe {
pyo3::ffi::PyErr_Restore(
self.ptype.into_ptr(),
self.pvalue.into_ptr(),
self.ptraceback.into_ptr(),
)
}
}
pub fn clear() {
if unsafe { !pyo3::ffi::PyErr_Occurred().is_null() } {
unsafe { pyo3::ffi::PyErr_Clear() };
}
}
}
| 25.420455 | 93 | 0.517414 |
aba100ed7ccbc658d00002cac9f33da865d2d450 | 1,636 | /// This Connector obtains the nanoErg per 1 USD rate and submits it
/// to an oracle core. It reads the `oracle-config.yaml` to find the port
/// of the oracle core (via Connector-Lib) and submits it to the POST API
/// server on the core.
/// Note: The value that is posted on-chain is the number
/// of nanoErgs per 1 USD, not the rate per nanoErg.
use anyhow::{anyhow, Result};
use frontend_connector_lib::FrontendConnector;
// Number of nanoErgs in a single Erg
static NANO_ERG_CONVERSION: f64 = 1000000000.0;
static CG_RATE_URL: &str =
"https://api.coingecko.com/api/v3/simple/price?ids=ergo&vs_currencies=USD";
/// Get the Erg/USD price from the nanoErgs per 1 USD datapoint price
pub fn generate_current_price(datapoint: u64) -> f64 {
(1.0 / datapoint as f64) * NANO_ERG_CONVERSION
}
/// Acquires the price of Ergs in USD from CoinGecko, convert it
/// into nanoErgs per 1 USD, and return it.
fn get_nanoerg_usd_price() -> Result<u64> {
let resp = reqwest::blocking::Client::new().get(CG_RATE_URL).send()?;
let price_json = json::parse(&resp.text()?)?;
if let Some(p) = price_json["ergo"]["usd"].as_f64() {
// Convert from price Erg/USD to nanoErgs per 1 USD
let nanoerg_price = (1.0 / p) * NANO_ERG_CONVERSION;
return Ok(nanoerg_price as u64);
} else {
Err(anyhow!("Failed to parse price from json."))
}
}
fn main() {
// Create the FrontendConnector
let connector = FrontendConnector::new_basic_connector(
"Erg-USD",
get_nanoerg_usd_price,
generate_current_price,
);
// Start the FrontendConnector
connector.run();
}
| 35.565217 | 79 | 0.687042 |
1e049068e66308027f4a5cda8be4d90f4ee605cd | 677 | use clap::*;
use std::io::BufRead;
fn main() {
let app = app_from_crate!()
.arg(Arg::from_usage("[NUM] 'print the tail NUM lines'").default_value("10"));
let matches = app.get_matches();
let num = matches.value_of("NUM").unwrap();
let num = match num.parse::<usize>() {
Ok(n) => n,
Err(_) => {
println!("error: NUM must be an integer");
std::process::exit(1);
}
};
let stdin = std::io::stdin();
let handle = stdin.lock();
let lines: Vec<String> = handle.lines().map(|l| l.unwrap()).collect();
for line in lines.iter().skip(lines.len() - num) {
println!("{}", line);
}
}
| 27.08 | 86 | 0.530281 |
5b1710a2674cc4dd5869b7c0a8479e659022edaa | 9,217 | use clap::{crate_version, App, Arg};
use serde::{Deserialize, Serialize};
use serde_json::Result;
use panoptes_bpf_loader_program::{
create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BpfError,
ThisInstructionMeter,
};
use panoptes_rbpf::{
assembler::assemble,
static_analysis::Analysis,
verifier::check,
vm::{Config, DynamicAnalysis, Executable},
};
use panoptes_sdk::{
account::AccountSharedData,
bpf_loader,
keyed_account::KeyedAccount,
process_instruction::{InvokeContext, MockInvokeContext},
pubkey::Pubkey,
};
use std::{cell::RefCell, fs::File, io::Read, io::Seek, io::SeekFrom, path::Path};
#[derive(Serialize, Deserialize, Debug)]
struct Account {
lamports: u64,
data: Vec<u8>,
owner: Pubkey,
}
#[derive(Serialize, Deserialize)]
struct Input {
accounts: Vec<Account>,
insndata: Vec<u8>,
}
fn load_accounts(path: &Path) -> Result<Input> {
let file = File::open(path).unwrap();
let input: Input = serde_json::from_reader(file)?;
println!("Program input:");
println!("accounts {:?}", &input.accounts);
println!("insndata {:?}", &input.insndata);
println!("----------------------------------------");
Ok(input)
}
fn main() {
panoptes_logger::setup();
let matches = App::new("Panoptes BPF CLI")
.version(crate_version!())
.author("Panoptes Maintainers <[email protected]>")
.about(
r##"CLI to test and analyze eBPF programs.
The tool executes eBPF programs in a mocked environment.
Some features, such as sysvars syscall and CPI, are not
available for the programs executed by the CLI tool.
The input data for a program execution have to be in JSON format
and the following fields are required
{
"accounts": [
{
"lamports": 1000,
"data": [0, 0, 0, 3],
"owner": [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
}
],
"insndata": []
}
"##,
)
.arg(
Arg::new("PROGRAM")
.about(
"Program file to use. This is either an ELF shared-object file to be executed, \
or an assembly file to be assembled and executed.",
)
.required(true)
.index(1)
)
.arg(
Arg::new("input")
.about(
"Input for the program to run on, where FILE is a name of a JSON file \
with input data, or BYTES is the number of 0-valued bytes to allocate for program parameters",
)
.short('i')
.long("input")
.value_name("FILE / BYTES")
.takes_value(true)
.default_value("0"),
)
.arg(
Arg::new("memory")
.about("Heap memory for the program to run on")
.short('m')
.long("memory")
.value_name("BYTES")
.takes_value(true)
.default_value("0"),
)
.arg(
Arg::new("use")
.about(
"Method of execution to use, where 'cfg' generates Control Flow Graph \
of the program, 'disassembler' dumps disassembled code of the program, 'interpreter' runs \
the program in the virtual machine's interpreter, and 'jit' precompiles the program to \
native machine code before execting it in the virtual machine.",
)
.short('u')
.long("use")
.takes_value(true)
.value_name("VALUE")
.possible_values(&["cfg", "disassembler", "interpreter", "jit"])
.default_value("interpreter"),
)
.arg(
Arg::new("instruction limit")
.about("Limit the number of instructions to execute")
.short('l')
.long("limit")
.takes_value(true)
.value_name("COUNT")
.default_value(&std::i64::MAX.to_string()),
)
.arg(
Arg::new("trace")
.about("Output trace to 'trace.out' file using tracing instrumentation")
.short('t')
.long("trace"),
)
.arg(
Arg::new("profile")
.about("Output profile to 'profile.dot' file using tracing instrumentation")
.short('p')
.long("profile"),
)
.arg(
Arg::new("verify")
.about("Run the verifier before execution or disassembly")
.short('v')
.long("verify"),
)
.get_matches();
let config = Config {
enable_instruction_tracing: matches.is_present("trace") || matches.is_present("profile"),
..Config::default()
};
let mut accounts = Vec::new();
let mut account_refcells = Vec::new();
let default_account = RefCell::new(AccountSharedData::default());
let key = panoptes_sdk::pubkey::new_rand();
let mut mem = match matches.value_of("input").unwrap().parse::<usize>() {
Ok(allocate) => {
accounts.push(KeyedAccount::new(&key, false, &default_account));
vec![0u8; allocate]
}
Err(_) => {
let input = load_accounts(Path::new(matches.value_of("input").unwrap())).unwrap();
for acc in input.accounts {
let asd = AccountSharedData::new_ref(acc.lamports, acc.data.len(), &acc.owner);
asd.borrow_mut().set_data(acc.data);
account_refcells.push(asd);
}
for acc in &account_refcells {
accounts.push(KeyedAccount::new(&key, false, acc));
}
let lid = bpf_loader::id();
let pid = Pubkey::new(&[0u8; 32]);
let mut bytes = serialize_parameters(&lid, &pid, &accounts, &input.insndata).unwrap();
Vec::from(bytes.as_slice_mut())
}
};
let mut invoke_context = MockInvokeContext::new(accounts);
let logger = invoke_context.logger.clone();
let compute_meter = invoke_context.get_compute_meter();
let mut instruction_meter = ThisInstructionMeter { compute_meter };
let program = matches.value_of("PROGRAM").unwrap();
let mut file = File::open(&Path::new(program)).unwrap();
let mut magic = [0u8; 4];
file.read_exact(&mut magic).unwrap();
file.seek(SeekFrom::Start(0)).unwrap();
let mut contents = Vec::new();
file.read_to_end(&mut contents).unwrap();
let mut executable = if magic == [0x7f, 0x45, 0x4c, 0x46] {
<dyn Executable<BpfError, ThisInstructionMeter>>::from_elf(&contents, None, config)
.map_err(|err| format!("Executable constructor failed: {:?}", err))
} else {
assemble::<BpfError, ThisInstructionMeter>(
std::str::from_utf8(contents.as_slice()).unwrap(),
None,
config,
)
}
.unwrap();
if matches.is_present("verify") {
let (_, elf_bytes) = executable.get_text_bytes().unwrap();
check(elf_bytes).unwrap();
}
executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap());
executable.jit_compile().unwrap();
let analysis = Analysis::from_executable(executable.as_ref());
match matches.value_of("use") {
Some("cfg") => {
let mut file = File::create("cfg.dot").unwrap();
analysis.visualize_graphically(&mut file, None).unwrap();
return;
}
Some("disassembler") => {
let stdout = std::io::stdout();
analysis.disassemble(&mut stdout.lock()).unwrap();
return;
}
_ => {}
}
let id = bpf_loader::id();
let mut vm = create_vm(&id, executable.as_ref(), &mut mem, &mut invoke_context).unwrap();
let result = if matches.value_of("use").unwrap() == "interpreter" {
vm.execute_program_interpreted(&mut instruction_meter)
} else {
vm.execute_program_jit(&mut instruction_meter)
};
if logger.log.borrow().len() > 0 {
println!("Program output:");
for s in logger.log.borrow_mut().iter() {
println!("{}", s);
}
println!("----------------------------------------");
}
println!("Result: {:?}", result);
println!("Instruction Count: {}", vm.get_total_instruction_count());
if matches.is_present("trace") {
println!("Trace is saved in trace.out");
let mut file = File::create("trace.out").unwrap();
let analysis = Analysis::from_executable(executable.as_ref());
vm.get_tracer().write(&mut file, &analysis).unwrap();
}
if matches.is_present("profile") {
println!("Profile is saved in profile.dot");
let tracer = &vm.get_tracer();
let dynamic_analysis = DynamicAnalysis::new(tracer, &analysis);
let mut file = File::create("profile.dot").unwrap();
analysis
.visualize_graphically(&mut file, Some(&dynamic_analysis))
.unwrap();
}
}
| 36.287402 | 100 | 0.553542 |
16c83a350a0c6c6841cafec787d982bd05217ccb | 401 | use proc_macro2::TokenStream;
use crate::dto_info::DtoKind;
use crate::SealedContainer;
mod expand_request;
use expand_request::expand_request;
mod expand_response;
use expand_response::expand_response;
pub(crate) fn expand(cont: &SealedContainer) -> TokenStream {
match cont.kind {
DtoKind::Request => expand_request(cont),
DtoKind::Response => expand_response(cont),
}
}
| 22.277778 | 61 | 0.735661 |
09df7d32a5ffa902469944b1a320b08eb6455efc | 834 | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn ktestb_1() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTB, operand1: Some(Direct(K2)), operand2: Some(Direct(K4)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 249, 153, 212], OperandSize::Dword)
}
#[test]
fn ktestb_2() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTB, operand1: Some(Direct(K3)), operand2: Some(Direct(K1)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 249, 153, 217], OperandSize::Qword)
}
| 43.894737 | 281 | 0.694245 |
fff6820e20446391c0db0c283480e91128087423 | 979 | //! [POST /_matrix/client/r0/account/3pid/bind](https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-account-3pid-bind)
use ruma_api::ruma_api;
use super::IdentityServerInfo;
ruma_api! {
metadata: {
description: "Bind a 3PID to a user's account on an identity server",
method: POST,
name: "bind_3pid",
path: "/_matrix/client/r0/account/3pid/bind",
rate_limited: true,
requires_authentication: true,
}
request: {
/// Client-generated secret string used to protect this session.
pub client_secret: String,
/// The ID server to send the onward request to as a hostname with an
/// appended colon and port number if the port is not the default.
#[serde(flatten)]
pub identity_server_info: IdentityServerInfo,
/// The session identifier given by the identity server.
pub sid: String,
}
response: {}
error: crate::Error
}
| 28.794118 | 138 | 0.6476 |
fb975fd1039a816032e5734631cd1b8d26c7461d | 1,497 | //
// Copyright 2019 Tamas Blummer
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! defiads
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(non_snake_case)]
#![deny(unused_mut)]
#![deny(unused_must_use)]
#![forbid(unsafe_code)]
extern crate snap;
extern crate byteorder;
extern crate serde;
extern crate serde_cbor;
extern crate serde_json;
extern crate rand_distr;
#[macro_use]extern crate serde_derive;
#[cfg(test)]extern crate hex;
extern crate jsonrpc_http_server;
extern crate bitcoin_hashes;
extern crate bitcoin;
extern crate bitcoin_wallet;
extern crate siphasher;
extern crate secp256k1;
extern crate rand;
extern crate murmel;
#[macro_use]extern crate log;
extern crate rusqlite;
mod error;
mod text;
mod ad;
mod iblt;
mod messages;
mod content;
mod discovery;
mod blockdownload;
pub mod trunk;
pub mod sendtx;
pub mod wallet;
pub mod api;
pub mod find_peers;
pub mod store;
pub mod db;
pub mod updater;
pub mod p2p_bitcoin;
pub mod p2p_defiads;
| 23.390625 | 75 | 0.766199 |
dec44401d9ba44cc7920b3b80af1644d04a871d7 | 7,879 | //! Runtime support needed for testing the stdsimd crate.
//!
//! This basically just disassembles the current executable and then parses the
//! output once globally and then provides the `assert` function which makes
//! assertions about the disassembly of a function.
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::missing_docs_in_private_items, clippy::print_stdout)
)]
extern crate assert_instr_macro;
extern crate backtrace;
extern crate cc;
#[macro_use]
extern crate lazy_static;
extern crate rustc_demangle;
extern crate simd_test_macro;
#[macro_use]
extern crate cfg_if;
pub use assert_instr_macro::*;
pub use simd_test_macro::*;
use std::{collections::HashMap, env, str};
// println! doesn't work on wasm32 right now, so shadow the compiler's println!
// macro with our own shim that redirects to `console.log`.
#[allow(unused)]
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! println {
($($args:tt)*) => (wasm::js_console_log(&format!($($args)*)))
}
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
extern crate wasm_bindgen;
extern crate console_error_panic_hook;
pub mod wasm;
use wasm::disassemble_myself;
} else {
mod disassembly;
use disassembly::disassemble_myself;
}
}
lazy_static! {
static ref DISASSEMBLY: HashMap<String, Vec<Function>> = disassemble_myself();
}
struct Function {
addr: Option<usize>,
instrs: Vec<Instruction>,
}
struct Instruction {
parts: Vec<String>,
}
fn normalize(symbol: &str) -> String {
let symbol = rustc_demangle::demangle(symbol).to_string();
let mut ret = match symbol.rfind("::h") {
Some(i) => symbol[..i].to_string(),
None => symbol.to_string(),
};
// Normalize to no leading underscore to handle platforms that may
// inject extra ones in symbol names
while ret.starts_with("_") {
ret.remove(0);
}
return ret;
}
/// Main entry point for this crate, called by the `#[assert_instr]` macro.
///
/// This asserts that the function at `fnptr` contains the instruction
/// `expected` provided.
pub fn assert(fnptr: usize, fnname: &str, expected: &str) {
let mut fnname = fnname.to_string();
let functions = get_functions(fnptr, &mut fnname);
assert_eq!(functions.len(), 1);
let function = &functions[0];
let mut instrs = &function.instrs[..];
while instrs.last().map_or(false, |s| s.parts == ["nop"]) {
instrs = &instrs[..instrs.len() - 1];
}
// Look for `expected` as the first part of any instruction in this
// function, returning if we do indeed find it.
let mut found = false;
for instr in instrs {
// Gets the first instruction, e.g. tzcntl in tzcntl %rax,%rax
if let Some(part) = instr.parts.get(0) {
// Truncates the instruction with the length of the expected
// instruction: tzcntl => tzcnt and compares that.
if part.starts_with(expected) {
found = true;
break;
}
}
}
// Look for `call` instructions in the disassembly to detect whether
// inlining failed: all intrinsics are `#[inline(always)]`, so
// calling one intrinsic from another should not generate `call`
// instructions.
let mut inlining_failed = false;
for (i, instr) in instrs.iter().enumerate() {
let part = match instr.parts.get(0) {
Some(part) => part,
None => continue,
};
if !part.contains("call") {
continue;
}
// On 32-bit x86 position independent code will call itself and be
// immediately followed by a `pop` to learn about the current address.
// Let's not take that into account when considering whether a function
// failed inlining something.
let followed_by_pop = function
.instrs
.get(i + 1)
.and_then(|i| i.parts.get(0))
.map_or(false, |s| s.contains("pop"));
if followed_by_pop && cfg!(target_arch = "x86") {
continue;
}
inlining_failed = true;
break;
}
let instruction_limit = std::env::var("STDSIMD_ASSERT_INSTR_LIMIT")
.ok()
.map_or_else(
|| match expected {
// cpuid returns a pretty big aggregate structure so exempt it
// from the slightly more restrictive 22
// instructions below
"cpuid" => 30,
// Apparently on Windows LLVM generates a bunch of
// saves/restores of xmm registers around these
// intstructions which blows the 20 limit
// below. As it seems dictates by Windows's abi
// (I guess?) we probably can't do much
// about it...
"vzeroall" | "vzeroupper" if cfg!(windows) => 30,
// Intrinsics using `cvtpi2ps` are typically "composites" and
// in some cases exceed the limit.
"cvtpi2ps" => 25,
// Original limit was 20 instructions, but ARM DSP Intrinsics
// are exactly 20 instructions long. So bump
// the limit to 22 instead of adding here a
// long list of exceptions.
_ => 22,
},
|v| v.parse().unwrap(),
);
let probably_only_one_instruction = instrs.len() < instruction_limit;
if found && probably_only_one_instruction && !inlining_failed {
return;
}
// Help debug by printing out the found disassembly, and then panic as we
// didn't find the instruction.
println!("disassembly for {}: ", fnname,);
for (i, instr) in instrs.iter().enumerate() {
let mut s = format!("\t{:2}: ", i);
for part in &instr.parts {
s.push_str(part);
s.push_str(" ");
}
println!("{}", s);
}
if !found {
panic!(
"failed to find instruction `{}` in the disassembly",
expected
);
} else if !probably_only_one_instruction {
panic!(
"instruction found, but the disassembly contains too many \
instructions: #instructions = {} >= {} (limit)",
instrs.len(),
instruction_limit
);
} else if inlining_failed {
panic!(
"instruction found, but the disassembly contains `call` \
instructions, which hint that inlining failed"
);
}
}
fn get_functions(fnptr: usize, fnname: &mut String) -> &'static [Function] {
// Translate this function pointer to a symbolic name that we'd have found
// in the disassembly.
let mut sym = None;
backtrace::resolve(fnptr as *mut _, |name| {
sym = name.name().and_then(|s| s.as_str()).map(normalize);
});
if let Some(sym) = &sym {
if let Some(s) = DISASSEMBLY.get(sym) {
*fnname = sym.to_string();
return s;
}
}
let exact_match = DISASSEMBLY
.iter()
.find(|(_, list)| list.iter().any(|f| f.addr == Some(fnptr)));
if let Some((name, list)) = exact_match {
*fnname = name.to_string();
return list;
}
if let Some(sym) = sym {
println!("assumed symbol name: `{}`", sym);
}
println!("maybe related functions");
for f in DISASSEMBLY.keys().filter(|k| k.contains(&**fnname)) {
println!("\t- {}", f);
}
panic!("failed to find disassembly of {:#x} ({})", fnptr, fnname);
}
pub fn assert_skip_test_ok(name: &str) {
if env::var("STDSIMD_TEST_EVERYTHING").is_err() {
return;
}
panic!("skipped test `{}` when it shouldn't be skipped", name);
}
// See comment in `assert-instr-macro` crate for why this exists
pub static mut _DONT_DEDUP: &'static str = "";
| 32.290984 | 82 | 0.589288 |
212003fff9dd5e3a3c87d210ae98f60e182b936c | 899 |
#[macro_export]
macro_rules! ignore_prev_enclose {
[( $($enc_args:tt)* ) $($add_prefix:ident)? || $b:block ] => {{ // empty args
$($add_prefix)? || {
$crate::enclose_var! {
$( $enc_args )*
}
$b
}
}};
[( $($enc_args:tt)* ) $($add_prefix:ident)? | $($args:tt),* | $b:block ] => {{ // args
$($add_prefix)? | $($args),* | {
$crate::enclose_var! {
$( $enc_args )*
}
$b
}
}};
[( $($enc_args:tt)* ) $p:tt $(:: $($all:tt)+)? ] => {{
$crate::enclose_var! {
$( $enc_args )*
}
$p $(:: $($all)+)?
}};
[( $($enc_args:tt)* )] => {{ // empty
$crate::enclose_var! {
$( $enc_args )*
}
}};
/*[ $($unk:tt)+ ] => {
compile_error!("Undefined entry or unsupported arguments, please double-check input.");
};*/
[] => {};
}
#[macro_export]
macro_rules! ignore_prev_enc {
[$($all:tt)*] => {
$crate::ignore_prev_enc! {
$($all)*
}
};
} | 17.98 | 89 | 0.471635 |
6af66d5fc9fa8bce3d55585df02d0d66b473a619 | 388 | use beserial::{Deserialize, Serialize};
pub mod account;
pub mod block;
pub mod blockchain;
pub mod mempool;
pub mod primitive;
pub mod transaction;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Subscription {
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u8)]
pub enum SubscriptionType {
None = 0,
Any = 1,
Addresses = 2,
MinFee = 3
}
| 16.869565 | 55 | 0.690722 |
8f92a422d5364fdf82f9f5f93b7435a948f57058 | 837 | impl Solution {
pub fn generate_parenthesis(n: i32) -> Vec<String> {
let mut left = n;
let mut right = n;
let mut res = Vec::new();
let mut cur = Vec::new();
Self::addingpar(left, right, &cur, &mut res);
return res
}
fn addingpar(left: i32, right: i32, cur: &Vec<u8>, res: &mut Vec<String>) {
if left == right && left == 0 {
res.push(unsafe { String::from_utf8_unchecked(cur.clone()) });
return;
}
if left > 0 {
let mut cur = cur.clone();
cur.push('(' as u8);
Self::addingpar(left - 1, right, &cur, res);
}
if left < right {
let mut cur = cur.clone();
cur.push(')' as u8);
Self::addingpar(left, right - 1, &cur, res);
}
}
}
| 26.15625 | 79 | 0.468339 |
8f7e3cfddd2eb7f552ba225908895a14bb2f18dc | 547 | witx_bindgen_rust::export!("crates/flags/flags.witx");
use flags::*;
struct Flags;
impl flags::Flags for Flags {
fn roundtrip_flag1(x: Flag1) -> Flag1 {
x
}
fn roundtrip_flag2(x: Flag2) -> Flag2 {
x
}
fn roundtrip_flag4(x: Flag4) -> Flag4 {
x
}
fn roundtrip_flag8(x: Flag8) -> Flag8 {
x
}
fn roundtrip_flag16(x: Flag16) -> Flag16 {
x
}
fn roundtrip_flag32(x: Flag32) -> Flag32 {
x
}
fn roundtrip_flag64(x: Flag64) -> Flag64 {
x
}
}
| 18.233333 | 54 | 0.541133 |
db6281322c27fad8d3117c4366beb17db08533ae | 56,100 | #![allow(non_camel_case_types)]
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_ {
pub d0: i8,
}
impl i8_ {
pub fn new(d0: i8) -> i8_ {
i8_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<i8> for i8_ {
fn from(other: i8) -> Self {
i8_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_i8 {
pub d0: i8,
pub d1: i8,
}
impl i8_i8 {
pub fn new(d0: i8, d1: i8) -> i8_i8 {
i8_i8 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i8, i8)> for i8_i8 {
fn from(other: (i8, i8)) -> Self {
i8_i8::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_i8_i8 {
pub d0: i8,
pub d1: i8,
pub d2: i8,
}
impl i8_i8_i8 {
pub fn new(d0: i8, d1: i8, d2: i8) -> i8_i8_i8 {
i8_i8_i8 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.VertexAttribIPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
gl.EnableVertexAttribArray(location as gl::types::GLuint);
}
}
impl From<(i8, i8, i8)> for i8_i8_i8 {
fn from(other: (i8, i8, i8)) -> Self {
i8_i8_i8::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_i8_i8_i8 {
pub d0: i8,
pub d1: i8,
pub d2: i8,
pub d3: i8,
}
impl i8_i8_i8_i8 {
pub fn new(d0: i8, d1: i8, d2: i8, d3: i8) -> i8_i8_i8_i8 {
i8_i8_i8_i8 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i8, i8, i8, i8)> for i8_i8_i8_i8 {
fn from(other: (i8, i8, i8, i8)) -> Self {
i8_i8_i8_i8::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_float {
pub d0: i8,
}
impl i8_float {
pub fn new(d0: i8) -> i8_float {
i8_float { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<i8> for i8_float {
fn from(other: i8) -> Self {
i8_float::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_i8_float {
pub d0: i8,
pub d1: i8,
}
impl i8_i8_float {
pub fn new(d0: i8, d1: i8) -> i8_i8_float {
i8_i8_float { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i8, i8)> for i8_i8_float {
fn from(other: (i8, i8)) -> Self {
i8_i8_float::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_i8_i8_float {
pub d0: i8,
pub d1: i8,
pub d2: i8,
}
impl i8_i8_i8_float {
pub fn new(d0: i8, d1: i8, d2: i8) -> i8_i8_i8_float {
i8_i8_i8_float { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i8, i8, i8)> for i8_i8_i8_float {
fn from(other: (i8, i8, i8)) -> Self {
i8_i8_i8_float::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i8_i8_i8_i8_float {
pub d0: i8,
pub d1: i8,
pub d2: i8,
pub d3: i8,
}
impl i8_i8_i8_i8_float {
pub fn new(d0: i8, d1: i8, d2: i8, d3: i8) -> i8_i8_i8_i8_float {
i8_i8_i8_i8_float { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i8, i8, i8, i8)> for i8_i8_i8_i8_float {
fn from(other: (i8, i8, i8, i8)) -> Self {
i8_i8_i8_i8_float::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_ {
pub d0: i16,
}
impl i16_ {
pub fn new(d0: i16) -> i16_ {
i16_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<i16> for i16_ {
fn from(other: i16) -> Self {
i16_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_i16 {
pub d0: i16,
pub d1: i16,
}
impl i16_i16 {
pub fn new(d0: i16, d1: i16) -> i16_i16 {
i16_i16 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i16, i16)> for i16_i16 {
fn from(other: (i16, i16)) -> Self {
i16_i16::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_i16_i16 {
pub d0: i16,
pub d1: i16,
pub d2: i16,
}
impl i16_i16_i16 {
pub fn new(d0: i16, d1: i16, d2: i16) -> i16_i16_i16 {
i16_i16_i16 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i16, i16, i16)> for i16_i16_i16 {
fn from(other: (i16, i16, i16)) -> Self {
i16_i16_i16::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_i16_i16_i16 {
pub d0: i16,
pub d1: i16,
pub d2: i16,
pub d3: i16,
}
impl i16_i16_i16_i16 {
pub fn new(d0: i16, d1: i16, d2: i16, d3: i16) -> i16_i16_i16_i16 {
i16_i16_i16_i16 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i16, i16, i16, i16)> for i16_i16_i16_i16 {
fn from(other: (i16, i16, i16, i16)) -> Self {
i16_i16_i16_i16::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_float {
pub d0: i16,
}
impl i16_float {
pub fn new(d0: i16) -> i16_float {
i16_float { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<i16> for i16_float {
fn from(other: i16) -> Self {
i16_float::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_i16_float {
pub d0: i16,
pub d1: i16,
}
impl i16_i16_float {
pub fn new(d0: i16, d1: i16) -> i16_i16_float {
i16_i16_float { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i16, i16)> for i16_i16_float {
fn from(other: (i16, i16)) -> Self {
i16_i16_float::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_i16_i16_float {
pub d0: i16,
pub d1: i16,
pub d2: i16,
}
impl i16_i16_i16_float {
pub fn new(d0: i16, d1: i16, d2: i16) -> i16_i16_i16_float {
i16_i16_i16_float { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i16, i16, i16)> for i16_i16_i16_float {
fn from(other: (i16, i16, i16)) -> Self {
i16_i16_i16_float::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i16_i16_i16_i16_float {
pub d0: i16,
pub d1: i16,
pub d2: i16,
pub d3: i16,
}
impl i16_i16_i16_i16_float {
pub fn new(d0: i16, d1: i16, d2: i16, d3: i16) -> i16_i16_i16_i16_float {
i16_i16_i16_i16_float { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i16, i16, i16, i16)> for i16_i16_i16_i16_float {
fn from(other: (i16, i16, i16, i16)) -> Self {
i16_i16_i16_i16_float::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_ {
pub d0: i32,
}
impl i32_ {
pub fn new(d0: i32) -> i32_ {
i32_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<i32> for i32_ {
fn from(other: i32) -> Self {
i32_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_i32 {
pub d0: i32,
pub d1: i32,
}
impl i32_i32 {
pub fn new(d0: i32, d1: i32) -> i32_i32 {
i32_i32 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i32, i32)> for i32_i32 {
fn from(other: (i32, i32)) -> Self {
i32_i32::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_i32_i32 {
pub d0: i32,
pub d1: i32,
pub d2: i32,
}
impl i32_i32_i32 {
pub fn new(d0: i32, d1: i32, d2: i32) -> i32_i32_i32 {
i32_i32_i32 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i32, i32, i32)> for i32_i32_i32 {
fn from(other: (i32, i32, i32)) -> Self {
i32_i32_i32::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_i32_i32_i32 {
pub d0: i32,
pub d1: i32,
pub d2: i32,
pub d3: i32,
}
impl i32_i32_i32_i32 {
pub fn new(d0: i32, d1: i32, d2: i32, d3: i32) -> i32_i32_i32_i32 {
i32_i32_i32_i32 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i32, i32, i32, i32)> for i32_i32_i32_i32 {
fn from(other: (i32, i32, i32, i32)) -> Self {
i32_i32_i32_i32::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_float {
pub d0: i32,
}
impl i32_float {
pub fn new(d0: i32) -> i32_float {
i32_float { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<i32> for i32_float {
fn from(other: i32) -> Self {
i32_float::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_i32_float {
pub d0: i32,
pub d1: i32,
}
impl i32_i32_float {
pub fn new(d0: i32, d1: i32) -> i32_i32_float {
i32_i32_float { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i32, i32)> for i32_i32_float {
fn from(other: (i32, i32)) -> Self {
i32_i32_float::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_i32_i32_float {
pub d0: i32,
pub d1: i32,
pub d2: i32,
}
impl i32_i32_i32_float {
pub fn new(d0: i32, d1: i32, d2: i32) -> i32_i32_i32_float {
i32_i32_i32_float { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i32, i32, i32)> for i32_i32_i32_float {
fn from(other: (i32, i32, i32)) -> Self {
i32_i32_i32_float::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct i32_i32_i32_i32_float {
pub d0: i32,
pub d1: i32,
pub d2: i32,
pub d3: i32,
}
impl i32_i32_i32_i32_float {
pub fn new(d0: i32, d1: i32, d2: i32, d3: i32) -> i32_i32_i32_i32_float {
i32_i32_i32_i32_float { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(i32, i32, i32, i32)> for i32_i32_i32_i32_float {
fn from(other: (i32, i32, i32, i32)) -> Self {
i32_i32_i32_i32_float::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_ {
pub d0: u8,
}
impl u8_ {
pub fn new(d0: u8) -> u8_ {
u8_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<u8> for u8_ {
fn from(other: u8) -> Self {
u8_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_u8 {
pub d0: u8,
pub d1: u8,
}
impl u8_u8 {
pub fn new(d0: u8, d1: u8) -> u8_u8 {
u8_u8 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u8, u8)> for u8_u8 {
fn from(other: (u8, u8)) -> Self {
u8_u8::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_u8_u8 {
pub d0: u8,
pub d1: u8,
pub d2: u8,
}
impl u8_u8_u8 {
pub fn new(d0: u8, d1: u8, d2: u8) -> u8_u8_u8 {
u8_u8_u8 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u8, u8, u8)> for u8_u8_u8 {
fn from(other: (u8, u8, u8)) -> Self {
u8_u8_u8::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_u8_u8_u8 {
pub d0: u8,
pub d1: u8,
pub d2: u8,
pub d3: u8,
}
impl u8_u8_u8_u8 {
pub fn new(d0: u8, d1: u8, d2: u8, d3: u8) -> u8_u8_u8_u8 {
u8_u8_u8_u8 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u8, u8, u8, u8)> for u8_u8_u8_u8 {
fn from(other: (u8, u8, u8, u8)) -> Self {
u8_u8_u8_u8::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_float {
pub d0: u8,
}
impl u8_float {
pub fn new(d0: u8) -> u8_float {
u8_float { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<u8> for u8_float {
fn from(other: u8) -> Self {
u8_float::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_u8_float {
pub d0: u8,
pub d1: u8,
}
impl u8_u8_float {
pub fn new(d0: u8, d1: u8) -> u8_u8_float {
u8_u8_float { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u8, u8)> for u8_u8_float {
fn from(other: (u8, u8)) -> Self {
u8_u8_float::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_u8_u8_float {
pub d0: u8,
pub d1: u8,
pub d2: u8,
}
impl u8_u8_u8_float {
pub fn new(d0: u8, d1: u8, d2: u8) -> u8_u8_u8_float {
u8_u8_u8_float { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u8, u8, u8)> for u8_u8_u8_float {
fn from(other: (u8, u8, u8)) -> Self {
u8_u8_u8_float::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u8_u8_u8_u8_float {
pub d0: u8,
pub d1: u8,
pub d2: u8,
pub d3: u8,
}
impl u8_u8_u8_u8_float {
pub fn new(d0: u8, d1: u8, d2: u8, d3: u8) -> u8_u8_u8_u8_float {
u8_u8_u8_u8_float { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::UNSIGNED_BYTE, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u8, u8, u8, u8)> for u8_u8_u8_u8_float {
fn from(other: (u8, u8, u8, u8)) -> Self {
u8_u8_u8_u8_float::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_ {
pub d0: u16,
}
impl u16_ {
pub fn new(d0: u16) -> u16_ {
u16_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<u16> for u16_ {
fn from(other: u16) -> Self {
u16_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_u16 {
pub d0: u16,
pub d1: u16,
}
impl u16_u16 {
pub fn new(d0: u16, d1: u16) -> u16_u16 {
u16_u16 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u16, u16)> for u16_u16 {
fn from(other: (u16, u16)) -> Self {
u16_u16::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_u16_u16 {
pub d0: u16,
pub d1: u16,
pub d2: u16,
}
impl u16_u16_u16 {
pub fn new(d0: u16, d1: u16, d2: u16) -> u16_u16_u16 {
u16_u16_u16 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u16, u16, u16)> for u16_u16_u16 {
fn from(other: (u16, u16, u16)) -> Self {
u16_u16_u16::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_u16_u16_u16 {
pub d0: u16,
pub d1: u16,
pub d2: u16,
pub d3: u16,
}
impl u16_u16_u16_u16 {
pub fn new(d0: u16, d1: u16, d2: u16, d3: u16) -> u16_u16_u16_u16 {
u16_u16_u16_u16 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u16, u16, u16, u16)> for u16_u16_u16_u16 {
fn from(other: (u16, u16, u16, u16)) -> Self {
u16_u16_u16_u16::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_float {
pub d0: u16,
}
impl u16_float {
pub fn new(d0: u16) -> u16_float {
u16_float { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<u16> for u16_float {
fn from(other: u16) -> Self {
u16_float::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_u16_float {
pub d0: u16,
pub d1: u16,
}
impl u16_u16_float {
pub fn new(d0: u16, d1: u16) -> u16_u16_float {
u16_u16_float { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u16, u16)> for u16_u16_float {
fn from(other: (u16, u16)) -> Self {
u16_u16_float::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_u16_u16_float {
pub d0: u16,
pub d1: u16,
pub d2: u16,
}
impl u16_u16_u16_float {
pub fn new(d0: u16, d1: u16, d2: u16) -> u16_u16_u16_float {
u16_u16_u16_float { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u16, u16, u16)> for u16_u16_u16_float {
fn from(other: (u16, u16, u16)) -> Self {
u16_u16_u16_float::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u16_u16_u16_u16_float {
pub d0: u16,
pub d1: u16,
pub d2: u16,
pub d3: u16,
}
impl u16_u16_u16_u16_float {
pub fn new(d0: u16, d1: u16, d2: u16, d3: u16) -> u16_u16_u16_u16_float {
u16_u16_u16_u16_float { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::UNSIGNED_SHORT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u16, u16, u16, u16)> for u16_u16_u16_u16_float {
fn from(other: (u16, u16, u16, u16)) -> Self {
u16_u16_u16_u16_float::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_ {
pub d0: u32,
}
impl u32_ {
pub fn new(d0: u32) -> u32_ {
u32_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<u32> for u32_ {
fn from(other: u32) -> Self {
u32_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_u32 {
pub d0: u32,
pub d1: u32,
}
impl u32_u32 {
pub fn new(d0: u32, d1: u32) -> u32_u32 {
u32_u32 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u32, u32)> for u32_u32 {
fn from(other: (u32, u32)) -> Self {
u32_u32::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_u32_u32 {
pub d0: u32,
pub d1: u32,
pub d2: u32,
}
impl u32_u32_u32 {
pub fn new(d0: u32, d1: u32, d2: u32) -> u32_u32_u32 {
u32_u32_u32 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u32, u32, u32)> for u32_u32_u32 {
fn from(other: (u32, u32, u32)) -> Self {
u32_u32_u32::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_u32_u32_u32 {
pub d0: u32,
pub d1: u32,
pub d2: u32,
pub d3: u32,
}
impl u32_u32_u32_u32 {
pub fn new(d0: u32, d1: u32, d2: u32, d3: u32) -> u32_u32_u32_u32 {
u32_u32_u32_u32 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribIPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u32, u32, u32, u32)> for u32_u32_u32_u32 {
fn from(other: (u32, u32, u32, u32)) -> Self {
u32_u32_u32_u32::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_float {
pub d0: u32,
}
impl u32_float {
pub fn new(d0: u32) -> u32_float {
u32_float { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<u32> for u32_float {
fn from(other: u32) -> Self {
u32_float::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_u32_float {
pub d0: u32,
pub d1: u32,
}
impl u32_u32_float {
pub fn new(d0: u32, d1: u32) -> u32_u32_float {
u32_u32_float { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u32, u32)> for u32_u32_float {
fn from(other: (u32, u32)) -> Self {
u32_u32_float::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_u32_u32_float {
pub d0: u32,
pub d1: u32,
pub d2: u32,
}
impl u32_u32_u32_float {
pub fn new(d0: u32, d1: u32, d2: u32) -> u32_u32_u32_float {
u32_u32_u32_float { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u32, u32, u32)> for u32_u32_u32_float {
fn from(other: (u32, u32, u32)) -> Self {
u32_u32_u32_float::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct u32_u32_u32_u32_float {
pub d0: u32,
pub d1: u32,
pub d2: u32,
pub d3: u32,
}
impl u32_u32_u32_u32_float {
pub fn new(d0: u32, d1: u32, d2: u32, d3: u32) -> u32_u32_u32_u32_float {
u32_u32_u32_u32_float { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::UNSIGNED_INT, // data type
gl::TRUE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(u32, u32, u32, u32)> for u32_u32_u32_u32_float {
fn from(other: (u32, u32, u32, u32)) -> Self {
u32_u32_u32_u32_float::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f32_ {
pub d0: f32,
}
impl f32_ {
pub fn new(d0: f32) -> f32_ {
f32_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::FLOAT, // data type
gl::FALSE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<f32> for f32_ {
fn from(other: f32) -> Self {
f32_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f32_f32 {
pub d0: f32,
pub d1: f32,
}
impl f32_f32 {
pub fn new(d0: f32, d1: f32) -> f32_f32 {
f32_f32 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::FLOAT, // data type
gl::FALSE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(f32, f32)> for f32_f32 {
fn from(other: (f32, f32)) -> Self {
f32_f32::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f32_f32_f32 {
pub d0: f32,
pub d1: f32,
pub d2: f32,
}
impl f32_f32_f32 {
pub fn new(d0: f32, d1: f32, d2: f32) -> f32_f32_f32 {
f32_f32_f32 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::FLOAT, // data type
gl::FALSE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(f32, f32, f32)> for f32_f32_f32 {
fn from(other: (f32, f32, f32)) -> Self {
f32_f32_f32::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f32_f32_f32_f32 {
pub d0: f32,
pub d1: f32,
pub d2: f32,
pub d3: f32,
}
impl f32_f32_f32_f32 {
pub fn new(d0: f32, d1: f32, d2: f32, d3: f32) -> f32_f32_f32_f32 {
f32_f32_f32_f32 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::FLOAT, // data type
gl::FALSE, // normalized (int-to-float conversion)
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(f32, f32, f32, f32)> for f32_f32_f32_f32 {
fn from(other: (f32, f32, f32, f32)) -> Self {
f32_f32_f32_f32::new(other.0, other.1, other.2, other.3)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f64_ {
pub d0: f64,
}
impl f64_ {
pub fn new(d0: f64) -> f64_ {
f64_ { d0 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribLPointer(
location as gl::types::GLuint,
1, // the number of components per generic vertex attribute
gl::DOUBLE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<f64> for f64_ {
fn from(other: f64) -> Self {
f64_::new(other)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f64_f64 {
pub d0: f64,
pub d1: f64,
}
impl f64_f64 {
pub fn new(d0: f64, d1: f64) -> f64_f64 {
f64_f64 { d0, d1 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribLPointer(
location as gl::types::GLuint,
2, // the number of components per generic vertex attribute
gl::DOUBLE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(f64, f64)> for f64_f64 {
fn from(other: (f64, f64)) -> Self {
f64_f64::new(other.0, other.1)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f64_f64_f64 {
pub d0: f64,
pub d1: f64,
pub d2: f64,
}
impl f64_f64_f64 {
pub fn new(d0: f64, d1: f64, d2: f64) -> f64_f64_f64 {
f64_f64_f64 { d0, d1, d2 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribLPointer(
location as gl::types::GLuint,
3, // the number of components per generic vertex attribute
gl::DOUBLE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(f64, f64, f64)> for f64_f64_f64 {
fn from(other: (f64, f64, f64)) -> Self {
f64_f64_f64::new(other.0, other.1, other.2)
}
}
// -----------------------------------------
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct f64_f64_f64_f64 {
pub d0: f64,
pub d1: f64,
pub d2: f64,
pub d3: f64,
}
impl f64_f64_f64_f64 {
pub fn new(d0: f64, d1: f64, d2: f64, d3: f64) -> f64_f64_f64_f64 {
f64_f64_f64_f64 { d0, d1, d2, d3 }
}
pub unsafe fn vertex_attrib_pointer(
gl: &gl::Gl,
stride: usize,
location: usize,
offset: usize,
) {
gl.EnableVertexAttribArray(location as gl::types::GLuint);
gl.VertexAttribLPointer(
location as gl::types::GLuint,
4, // the number of components per generic vertex attribute
gl::DOUBLE, // data type
stride as gl::types::GLint,
offset as *const gl::types::GLvoid,
);
}
}
impl From<(f64, f64, f64, f64)> for f64_f64_f64_f64 {
fn from(other: (f64, f64, f64, f64)) -> Self {
f64_f64_f64_f64::new(other.0, other.1, other.2, other.3)
}
}
| 26.350399 | 89 | 0.505579 |
ab41d8faaa5e8e9a244cd297135a9aa7ea9467de | 3,257 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(test)]
use {
blobfs_ramdisk::BlobfsRamdisk,
fuchsia_async as fasync,
fuchsia_pkg_testing::{PackageBuilder, SystemImageBuilder},
fuchsia_zircon::Status,
pkgfs_ramdisk::PkgfsRamdisk,
std::convert::{TryFrom as _, TryInto as _},
};
fn make_file_contents(size: usize) -> impl Iterator<Item = u8> {
b"ABCD".iter().copied().cycle().take(size)
}
// zero is a multiple
fn round_up_to_4096_multiple(val: usize) -> usize {
(val + 4095) & !4095
}
// meta far file VMOs are zero-padded to the smallest multiple of 4096
fn validate_vmo_contents(file_size: usize, vmo_contents: &[u8]) {
let vmo_size = round_up_to_4096_multiple(file_size);
assert!(
make_file_contents(file_size)
.chain(std::iter::repeat(b'\0'))
.take(vmo_size)
.eq(vmo_contents.iter().copied()),
"vmo content mismatch for file size {}",
file_size
);
}
#[fasync::run_singlethreaded(test)]
async fn meta_far_file() {
let file_sizes = [0, 1, 4095, 4096, 4097];
let mut base_pkg_with_meta_files = PackageBuilder::new("example");
for size in &file_sizes {
base_pkg_with_meta_files = base_pkg_with_meta_files.add_resource_at(
format!("meta/{}", size),
make_file_contents(*size).collect::<Vec<u8>>().as_slice(),
);
}
let base_pkg_with_meta_files = base_pkg_with_meta_files.build().await.expect("build package");
let system_image =
SystemImageBuilder::new().static_packages(&[&base_pkg_with_meta_files]).build().await;
let blobfs = BlobfsRamdisk::start().unwrap();
system_image.write_to_blobfs_dir(&blobfs.root_dir().unwrap());
base_pkg_with_meta_files.write_to_blobfs_dir(&blobfs.root_dir().unwrap());
let pkgfs = PkgfsRamdisk::builder()
.blobfs(blobfs)
.system_image_merkle(system_image.meta_far_merkle_root())
.start()
.unwrap();
let d = pkgfs.root_dir_proxy().expect("getting pkgfs root dir");
for size in &file_sizes {
let meta_far_file = io_util::directory::open_file(
&d,
&format!("versions/{}/meta/{}", base_pkg_with_meta_files.meta_far_merkle_root(), size),
io_util::OPEN_RIGHT_READABLE,
)
.await
.unwrap();
let vmo = meta_far_file
.get_backing_memory(
fidl_fuchsia_io::VmoFlags::READ | fidl_fuchsia_io::VmoFlags::PRIVATE_CLONE,
)
.await
.unwrap()
.map_err(Status::from_raw)
.unwrap();
assert_eq!(usize::try_from(vmo.get_content_size().unwrap()).unwrap(), *size);
let vmo_size = vmo.get_size().unwrap().try_into().unwrap();
let mut actual_contents = vec![0u8; vmo_size];
let () = vmo.read(actual_contents.as_mut_slice(), 0).unwrap();
validate_vmo_contents(*size, &actual_contents);
}
// Drop the directory before we shutdown the server that's serving it.
// In practice, this probably doesn't make a difference.
drop(d);
pkgfs.stop().await.expect("stopping pkgfs");
}
| 36.188889 | 99 | 0.647835 |
26a68ecf9211934b1f9a3b6353063030fd251abc | 2,719 | // Copyright (c) 2018-2020 MobileCoin Inc.
use core::convert::TryFrom;
use displaydoc::Display;
use mc_attest_core::{
QuoteSignType, ReportData, ReportDataMask, VerificationReport, VerificationReportData,
Verifier, VerifierError, VerifyError,
};
use mc_crypto_keys::{KeyError, RistrettoPublic};
use mc_util_encodings::Error as EncodingError;
/// A structure that can validate ingest enclave reports and measurements at runtime.
///
/// This is expected to take the verification report and produce the ias-validated and decompressed RistrettoPublic key.
#[derive(Clone, Debug)]
pub struct IngestReportVerifier {
verifier: Verifier,
}
impl IngestReportVerifier {
/// Validate a remote ingest ias report, and extract the pubkey from the report data bytes.
/// The details of this are tied to the layout of the "identity" object in the ingest enclave impl.
pub fn validate_ingest_ias_report(
&self,
remote_report: VerificationReport,
) -> Result<RistrettoPublic, Error> {
let verification_report_data = VerificationReportData::try_from(&remote_report)?;
let report_data: ReportData = verification_report_data.quote.report_body()?.report_data();
let report_data_bytes: &[u8] = report_data.as_ref();
// Extract the pubkey from the signed evidence
let report_pubkey: RistrettoPublic = RistrettoPublic::try_from(&report_data_bytes[32..64])?;
let masked_report_data = ReportDataMask::new_with_mask(report_data_bytes, &[0u8; 32])?;
let mut verifier = self.verifier.clone();
verifier
.sign_type(QuoteSignType::Linkable)
.report_data(&masked_report_data);
verifier.verify(&remote_report)?;
Ok(report_pubkey)
}
}
impl From<&Verifier> for IngestReportVerifier {
fn from(src: &Verifier) -> Self {
Self {
verifier: src.clone(),
}
}
}
/// An error that can occur when validating an ingest report
#[derive(Clone, Debug, Display, PartialEq)]
pub enum Error {
/// Encoding Error: {0}
Encoding(EncodingError),
/// Key Error: {0}
Key(KeyError),
/// Verification failed: {0}
VerificationParse(VerifyError),
/// Verifier error: {0}
Verifier(VerifierError),
}
impl From<EncodingError> for Error {
fn from(src: EncodingError) -> Self {
Self::Encoding(src)
}
}
impl From<VerifyError> for Error {
fn from(src: VerifyError) -> Self {
Self::VerificationParse(src)
}
}
impl From<VerifierError> for Error {
fn from(src: VerifierError) -> Self {
Self::Verifier(src)
}
}
impl From<KeyError> for Error {
fn from(src: KeyError) -> Self {
Self::Key(src)
}
}
| 29.879121 | 120 | 0.681501 |
0865ca7ea7df064fc9828d85ee35db9f2c143cce | 2,508 | use super::super::*;
// Test target self-consistency and JSON encoding/decoding roundtrip.
pub(super) fn test_target(target: Target) {
target.check_consistency();
assert_eq!(Target::from_json(target.to_json()).map(|(j, _)| j), Ok(target));
}
impl Target {
fn check_consistency(&self) {
assert_eq!(self.is_like_osx, self.vendor == "apple");
assert_eq!(self.is_like_solaris, self.os == "solaris" || self.os == "illumos");
assert_eq!(self.is_like_windows, self.os == "windows" || self.os == "uefi");
assert_eq!(self.is_like_wasm, self.arch == "wasm32" || self.arch == "wasm64");
assert!(self.is_like_windows || !self.is_like_msvc);
// Check that LLD with the given flavor is treated identically to the linker it emulates.
// If your target really needs to deviate from the rules below, except it and document the
// reasons.
assert_eq!(
self.linker_flavor == LinkerFlavor::Msvc
|| self.linker_flavor == LinkerFlavor::Lld(LldFlavor::Link),
self.lld_flavor == LldFlavor::Link,
);
assert_eq!(self.is_like_msvc, self.lld_flavor == LldFlavor::Link);
for args in &[
&self.pre_link_args,
&self.late_link_args,
&self.late_link_args_dynamic,
&self.late_link_args_static,
&self.post_link_args,
] {
assert_eq!(
args.get(&LinkerFlavor::Msvc),
args.get(&LinkerFlavor::Lld(LldFlavor::Link)),
);
if args.contains_key(&LinkerFlavor::Msvc) {
assert_eq!(self.lld_flavor, LldFlavor::Link);
}
}
assert!(
(self.pre_link_objects_fallback.is_empty()
&& self.post_link_objects_fallback.is_empty())
|| self.crt_objects_fallback.is_some()
);
// Keep the default "unknown" vendor instead.
assert_ne!(self.vendor, "");
if !self.can_use_os_unknown() {
// Keep the default "none" for bare metal targets instead.
assert_ne!(self.os, "unknown");
}
}
// Add your target to the whitelist if it has `std` library
// and you certainly want "unknown" for the OS name.
fn can_use_os_unknown(&self) -> bool {
self.llvm_target == "wasm32-unknown-unknown"
|| self.llvm_target == "wasm64-unknown-unknown"
|| (self.env == "sgx" && self.vendor == "fortanix")
}
}
| 40.451613 | 98 | 0.588517 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.