hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
227864d77e985f3fec81cd8931698b89c288d600
11,218
use std::ffi::OsString; use std::fs::File; use std::io::{BufRead, BufReader, BufWriter, Seek}; use std::path::Path; use std::u32; #[cfg(feature = "bmp")] use crate::bmp; #[cfg(feature = "gif")] use crate::gif; #[cfg(feature = "hdr")] use crate::hdr; #[cfg(feature = "ico")] use crate::ico; #[cfg(feature = "jpeg")] use crate::jpeg; #[cfg(feature = "png")] use crate::png; #[cfg(feature = "pnm")] use crate::pnm; #[cfg(feature = "tga")] use crate::tga; #[cfg(feature = "dds")] use crate::dds; #[cfg(feature = "tiff")] use crate::tiff; #[cfg(feature = "webp")] use crate::webp; use crate::color; use crate::image; use crate::dynimage::DynamicImage; use crate::error::{ImageError, ImageFormatHint, ImageResult}; use crate::image::{ImageDecoder, ImageEncoder, ImageFormat}; /// Internal error type for guessing format from path. pub(crate) enum PathError { /// The extension did not fit a supported format. UnknownExtension(OsString), /// Extension could not be converted to `str`. NoExtension, } pub(crate) fn open_impl(path: &Path) -> ImageResult<DynamicImage> { let fin = match File::open(path) { Ok(f) => f, Err(err) => return Err(ImageError::IoError(err)), }; let fin = BufReader::new(fin); load(fin, ImageFormat::from_path(path)?) } /// Create a new image from a Reader /// /// Try [`io::Reader`] for more advanced uses. /// /// [`io::Reader`]: io/struct.Reader.html pub fn load<R: BufRead + Seek>(r: R, format: ImageFormat) -> ImageResult<DynamicImage> { #[allow(deprecated, unreachable_patterns)] // Default is unreachable if all features are supported. match format { #[cfg(feature = "png")] image::ImageFormat::Png => DynamicImage::from_decoder(png::PngDecoder::new(r)?), #[cfg(feature = "gif")] image::ImageFormat::Gif => DynamicImage::from_decoder(gif::GifDecoder::new(r)?), #[cfg(feature = "jpeg")] image::ImageFormat::Jpeg => DynamicImage::from_decoder(jpeg::JpegDecoder::new(r)?), #[cfg(feature = "webp")] image::ImageFormat::WebP => DynamicImage::from_decoder(webp::WebPDecoder::new(r)?), #[cfg(feature = "tiff")] image::ImageFormat::Tiff => DynamicImage::from_decoder(tiff::TiffDecoder::new(r)?), #[cfg(feature = "tga")] image::ImageFormat::Tga => DynamicImage::from_decoder(tga::TgaDecoder::new(r)?), #[cfg(feature = "dds")] image::ImageFormat::Dds => DynamicImage::from_decoder(dds::DdsDecoder::new(r)?), #[cfg(feature = "bmp")] image::ImageFormat::Bmp => DynamicImage::from_decoder(bmp::BmpDecoder::new(r)?), #[cfg(feature = "ico")] image::ImageFormat::Ico => DynamicImage::from_decoder(ico::IcoDecoder::new(r)?), #[cfg(feature = "hdr")] image::ImageFormat::Hdr => DynamicImage::from_decoder(hdr::HDRAdapter::new(BufReader::new(r))?), #[cfg(feature = "pnm")] image::ImageFormat::Pnm => DynamicImage::from_decoder(pnm::PnmDecoder::new(BufReader::new(r))?), _ => Err(ImageError::Unsupported(ImageFormatHint::Exact(format).into())), } } pub(crate) fn image_dimensions_impl(path: &Path) -> ImageResult<(u32, u32)> { let format = image::ImageFormat::from_path(path)?; let fin = File::open(path)?; let fin = BufReader::new(fin); image_dimensions_with_format_impl(fin, format) } pub(crate) fn image_dimensions_with_format_impl<R: BufRead + Seek>(fin: R, format: ImageFormat) -> ImageResult<(u32, u32)> { #[allow(unreachable_patterns)] // Default is unreachable if all features are supported. Ok(match format { #[cfg(feature = "jpeg")] image::ImageFormat::Jpeg => jpeg::JpegDecoder::new(fin)?.dimensions(), #[cfg(feature = "png")] image::ImageFormat::Png => png::PngDecoder::new(fin)?.dimensions(), #[cfg(feature = "gif")] image::ImageFormat::Gif => gif::GifDecoder::new(fin)?.dimensions(), #[cfg(feature = "webp")] image::ImageFormat::WebP => webp::WebPDecoder::new(fin)?.dimensions(), #[cfg(feature = "tiff")] image::ImageFormat::Tiff => tiff::TiffDecoder::new(fin)?.dimensions(), #[cfg(feature = "tga")] image::ImageFormat::Tga => tga::TgaDecoder::new(fin)?.dimensions(), #[cfg(feature = "dds")] image::ImageFormat::Dds => dds::DdsDecoder::new(fin)?.dimensions(), #[cfg(feature = "bmp")] image::ImageFormat::Bmp => bmp::BmpDecoder::new(fin)?.dimensions(), #[cfg(feature = "ico")] image::ImageFormat::Ico => ico::IcoDecoder::new(fin)?.dimensions(), #[cfg(feature = "hdr")] image::ImageFormat::Hdr => hdr::HDRAdapter::new(fin)?.dimensions(), #[cfg(feature = "pnm")] image::ImageFormat::Pnm => { pnm::PnmDecoder::new(fin)?.dimensions() } format => return Err(ImageError::Unsupported(ImageFormatHint::Exact(format).into())), }) } pub(crate) fn save_buffer_impl( path: &Path, buf: &[u8], width: u32, height: u32, color: color::ColorType, ) -> ImageResult<()> { let fout = &mut BufWriter::new(File::create(path)?); let ext = path.extension() .and_then(|s| s.to_str()) .map_or("".to_string(), |s| s.to_ascii_lowercase()); match &*ext { #[cfg(feature = "gif")] "gif" => gif::Encoder::new(fout).encode(buf, width, height, color), #[cfg(feature = "ico")] "ico" => ico::ICOEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "jpeg")] "jpg" | "jpeg" => jpeg::JPEGEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "png")] "png" => png::PNGEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "pnm")] "pbm" => pnm::PNMEncoder::new(fout) .with_subtype(pnm::PNMSubtype::Bitmap(pnm::SampleEncoding::Binary)) .write_image(buf, width, height, color), #[cfg(feature = "pnm")] "pgm" => pnm::PNMEncoder::new(fout) .with_subtype(pnm::PNMSubtype::Graymap(pnm::SampleEncoding::Binary)) .write_image(buf, width, height, color), #[cfg(feature = "pnm")] "ppm" => pnm::PNMEncoder::new(fout) .with_subtype(pnm::PNMSubtype::Pixmap(pnm::SampleEncoding::Binary)) .write_image(buf, width, height, color), #[cfg(feature = "pnm")] "pam" => pnm::PNMEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "bmp")] "bmp" => bmp::BMPEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "tiff")] "tif" | "tiff" => tiff::TiffEncoder::new(fout) .write_image(buf, width, height, color), _ => Err(ImageError::Unsupported(ImageFormatHint::from(path).into())), } } pub(crate) fn save_buffer_with_format_impl( path: &Path, buf: &[u8], width: u32, height: u32, color: color::ColorType, format: ImageFormat, ) -> ImageResult<()> { let fout = &mut BufWriter::new(File::create(path)?); match format { #[cfg(feature = "gif")] image::ImageFormat::Gif => gif::Encoder::new(fout).encode(buf, width, height, color), #[cfg(feature = "ico")] image::ImageFormat::Ico => ico::ICOEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "jpeg")] image::ImageFormat::Jpeg => jpeg::JPEGEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "png")] image::ImageFormat::Png => png::PNGEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "bmp")] image::ImageFormat::Bmp => bmp::BMPEncoder::new(fout).write_image(buf, width, height, color), #[cfg(feature = "tiff")] image::ImageFormat::Tiff => tiff::TiffEncoder::new(fout) .write_image(buf, width, height, color), format => return Err(ImageError::Unsupported(ImageFormatHint::Exact(format).into())), } } /// Guess format from a path. /// /// Returns `PathError::NoExtension` if the path has no extension or returns a /// `PathError::UnknownExtension` containing the extension if it can not be convert to a `str`. pub(crate) fn guess_format_from_path_impl(path: &Path) -> Result<ImageFormat, PathError> { let exact_ext = path.extension(); let ext = exact_ext .and_then(|s| s.to_str()) .map(str::to_ascii_lowercase); let ext = ext.as_ref() .map(String::as_str); Ok(match ext { Some("jpg") | Some("jpeg") => image::ImageFormat::Jpeg, Some("png") => image::ImageFormat::Png, Some("gif") => image::ImageFormat::Gif, Some("webp") => image::ImageFormat::WebP, Some("tif") | Some("tiff") => image::ImageFormat::Tiff, Some("tga") => image::ImageFormat::Tga, Some("dds") => image::ImageFormat::Dds, Some("bmp") => image::ImageFormat::Bmp, Some("ico") => image::ImageFormat::Ico, Some("hdr") => image::ImageFormat::Hdr, Some("pbm") | Some("pam") | Some("ppm") | Some("pgm") => image::ImageFormat::Pnm, // The original extension is used, instead of _format _ => return match exact_ext { None => Err(PathError::NoExtension), Some(os) => Err(PathError::UnknownExtension(os.to_owned())), }, }) } static MAGIC_BYTES: [(&'static [u8], ImageFormat); 18] = [ (b"\x89PNG\r\n\x1a\n", ImageFormat::Png), (&[0xff, 0xd8, 0xff], ImageFormat::Jpeg), (b"GIF89a", ImageFormat::Gif), (b"GIF87a", ImageFormat::Gif), (b"RIFF", ImageFormat::WebP), // TODO: better magic byte detection, see https://github.com/image-rs/image/issues/660 (b"MM\x00*", ImageFormat::Tiff), (b"II*\x00", ImageFormat::Tiff), (b"DDS ", ImageFormat::Dds), (b"BM", ImageFormat::Bmp), (&[0, 0, 1, 0], ImageFormat::Ico), (b"#?RADIANCE", ImageFormat::Hdr), (b"P1", ImageFormat::Pnm), (b"P2", ImageFormat::Pnm), (b"P3", ImageFormat::Pnm), (b"P4", ImageFormat::Pnm), (b"P5", ImageFormat::Pnm), (b"P6", ImageFormat::Pnm), (b"P7", ImageFormat::Pnm), ]; /// Guess image format from memory block /// /// Makes an educated guess about the image format based on the Magic Bytes at the beginning. /// TGA is not supported by this function. /// This is not to be trusted on the validity of the whole memory block pub fn guess_format(buffer: &[u8]) -> ImageResult<ImageFormat> { match guess_format_impl(buffer) { Some(format) => Ok(format), None => Err(ImageError::Unsupported(ImageFormatHint::Unknown.into())), } } pub(crate) fn guess_format_impl(buffer: &[u8]) -> Option<ImageFormat> { for &(signature, format) in &MAGIC_BYTES { if buffer.starts_with(signature) { return Some(format); } } None } impl From<PathError> for ImageError { fn from(path: PathError) -> Self { let format_hint = match path { PathError::NoExtension => ImageFormatHint::Unknown, PathError::UnknownExtension(ext) => ImageFormatHint::PathExtension(ext.into()), }; ImageError::Unsupported(format_hint.into()) } }
38.682759
120
0.607773
5dd743e8d00236b93dfd4e79600a65af700c77ce
100,034
//! The current rustc diagnostics emitter. //! //! An `Emitter` takes care of generating the output from a `DiagnosticBuilder` struct. //! //! There are various `Emitter` implementations that generate different output formats such as //! JSON and human readable output. //! //! The output types are defined in `rustc_session::config::ErrorOutputType`. use Destination::*; use rustc_span::source_map::SourceMap; use rustc_span::{SourceFile, Span}; use crate::snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, Style, StyledString}; use crate::styled_buffer::StyledBuffer; use crate::{ CodeSuggestion, Diagnostic, DiagnosticArg, DiagnosticId, DiagnosticMessage, FluentBundle, Handler, LazyFallbackBundle, Level, MultiSpan, SubDiagnostic, SubstitutionHighlight, SuggestionStyle, }; use rustc_lint_defs::pluralize; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lrc; use rustc_error_messages::FluentArgs; use rustc_span::hygiene::{ExpnKind, MacroKind}; use std::borrow::Cow; use std::cmp::{max, min, Reverse}; use std::io; use std::io::prelude::*; use std::iter; use std::path::Path; use termcolor::{Ansi, BufferWriter, ColorChoice, ColorSpec, StandardStream}; use termcolor::{Buffer, Color, WriteColor}; use tracing::*; /// Default column width, used in tests and when terminal dimensions cannot be determined. const DEFAULT_COLUMN_WIDTH: usize = 140; /// Describes the way the content of the `rendered` field of the json output is generated #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HumanReadableErrorType { Default(ColorConfig), AnnotateSnippet(ColorConfig), Short(ColorConfig), } impl HumanReadableErrorType { /// Returns a (`short`, `color`) tuple pub fn unzip(self) -> (bool, ColorConfig) { match self { HumanReadableErrorType::Default(cc) => (false, cc), HumanReadableErrorType::Short(cc) => (true, cc), HumanReadableErrorType::AnnotateSnippet(cc) => (false, cc), } } pub fn new_emitter( self, dst: Box<dyn Write + Send>, source_map: Option<Lrc<SourceMap>>, bundle: Option<Lrc<FluentBundle>>, fallback_bundle: LazyFallbackBundle, teach: bool, terminal_width: Option<usize>, macro_backtrace: bool, ) -> EmitterWriter { let (short, color_config) = self.unzip(); let color = color_config.suggests_using_colors(); EmitterWriter::new( dst, source_map, bundle, fallback_bundle, short, teach, color, terminal_width, macro_backtrace, ) } } #[derive(Clone, Copy, Debug)] struct Margin { /// The available whitespace in the left that can be consumed when centering. pub whitespace_left: usize, /// The column of the beginning of left-most span. pub span_left: usize, /// The column of the end of right-most span. pub span_right: usize, /// The beginning of the line to be displayed. pub computed_left: usize, /// The end of the line to be displayed. pub computed_right: usize, /// The current width of the terminal. Uses value of `DEFAULT_COLUMN_WIDTH` constant by default /// and in tests. pub column_width: usize, /// The end column of a span label, including the span. Doesn't account for labels not in the /// same line as the span. pub label_right: usize, } impl Margin { fn new( whitespace_left: usize, span_left: usize, span_right: usize, label_right: usize, column_width: usize, max_line_len: usize, ) -> Self { // The 6 is padding to give a bit of room for `...` when displaying: // ``` // error: message // --> file.rs:16:58 // | // 16 | ... fn foo(self) -> Self::Bar { // | ^^^^^^^^^ // ``` let mut m = Margin { whitespace_left: whitespace_left.saturating_sub(6), span_left: span_left.saturating_sub(6), span_right: span_right + 6, computed_left: 0, computed_right: 0, column_width, label_right: label_right + 6, }; m.compute(max_line_len); m } fn was_cut_left(&self) -> bool { self.computed_left > 0 } fn was_cut_right(&self, line_len: usize) -> bool { let right = if self.computed_right == self.span_right || self.computed_right == self.label_right { // Account for the "..." padding given above. Otherwise we end up with code lines that // do fit but end in "..." as if they were trimmed. self.computed_right - 6 } else { self.computed_right }; right < line_len && self.computed_left + self.column_width < line_len } fn compute(&mut self, max_line_len: usize) { // When there's a lot of whitespace (>20), we want to trim it as it is useless. self.computed_left = if self.whitespace_left > 20 { self.whitespace_left - 16 // We want some padding. } else { 0 }; // We want to show as much as possible, max_line_len is the right-most boundary for the // relevant code. self.computed_right = max(max_line_len, self.computed_left); if self.computed_right - self.computed_left > self.column_width { // Trimming only whitespace isn't enough, let's get craftier. if self.label_right - self.whitespace_left <= self.column_width { // Attempt to fit the code window only trimming whitespace. self.computed_left = self.whitespace_left; self.computed_right = self.computed_left + self.column_width; } else if self.label_right - self.span_left <= self.column_width { // Attempt to fit the code window considering only the spans and labels. let padding_left = (self.column_width - (self.label_right - self.span_left)) / 2; self.computed_left = self.span_left.saturating_sub(padding_left); self.computed_right = self.computed_left + self.column_width; } else if self.span_right - self.span_left <= self.column_width { // Attempt to fit the code window considering the spans and labels plus padding. let padding_left = (self.column_width - (self.span_right - self.span_left)) / 5 * 2; self.computed_left = self.span_left.saturating_sub(padding_left); self.computed_right = self.computed_left + self.column_width; } else { // Mostly give up but still don't show the full line. self.computed_left = self.span_left; self.computed_right = self.span_right; } } } fn left(&self, line_len: usize) -> usize { min(self.computed_left, line_len) } fn right(&self, line_len: usize) -> usize { if line_len.saturating_sub(self.computed_left) <= self.column_width { line_len } else { min(line_len, self.computed_right) } } } const ANONYMIZED_LINE_NUM: &str = "LL"; /// Emitter trait for emitting errors. pub trait Emitter { /// Emit a structured diagnostic. fn emit_diagnostic(&mut self, diag: &Diagnostic); /// Emit a notification that an artifact has been output. /// This is currently only supported for the JSON format, /// other formats can, and will, simply ignore it. fn emit_artifact_notification(&mut self, _path: &Path, _artifact_type: &str) {} fn emit_future_breakage_report(&mut self, _diags: Vec<Diagnostic>) {} /// Emit list of unused externs fn emit_unused_externs( &mut self, _lint_level: rustc_lint_defs::Level, _unused_externs: &[&str], ) { } /// Checks if should show explanations about "rustc --explain" fn should_show_explain(&self) -> bool { true } /// Checks if we can use colors in the current output stream. fn supports_color(&self) -> bool { false } fn source_map(&self) -> Option<&Lrc<SourceMap>>; /// Return `FluentBundle` with localized diagnostics for the locale requested by the user. If no /// language was requested by the user then this will be `None` and `fallback_fluent_bundle` /// should be used. fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>>; /// Return `FluentBundle` with localized diagnostics for the default locale of the compiler. /// Used when the user has not requested a specific language or when a localized diagnostic is /// unavailable for the requested locale. fn fallback_fluent_bundle(&self) -> &FluentBundle; /// Convert diagnostic arguments (a rustc internal type that exists to implement /// `Encodable`/`Decodable`) into `FluentArgs` which is necessary to perform translation. /// /// Typically performed once for each diagnostic at the start of `emit_diagnostic` and then /// passed around as a reference thereafter. fn to_fluent_args<'arg>(&self, args: &[DiagnosticArg<'arg>]) -> FluentArgs<'arg> { FromIterator::from_iter(args.to_vec().drain(..)) } /// Convert `DiagnosticMessage`s to a string, performing translation if necessary. fn translate_messages( &self, messages: &[(DiagnosticMessage, Style)], args: &FluentArgs<'_>, ) -> Cow<'_, str> { Cow::Owned( messages.iter().map(|(m, _)| self.translate_message(m, args)).collect::<String>(), ) } /// Convert a `DiagnosticMessage` to a string, performing translation if necessary. fn translate_message<'a>( &'a self, message: &'a DiagnosticMessage, args: &'a FluentArgs<'_>, ) -> Cow<'_, str> { trace!(?message, ?args); let (identifier, attr) = match message { DiagnosticMessage::Str(msg) => return Cow::Borrowed(&msg), DiagnosticMessage::FluentIdentifier(identifier, attr) => (identifier, attr), }; let bundle = match self.fluent_bundle() { Some(bundle) if bundle.has_message(&identifier) => bundle, _ => self.fallback_fluent_bundle(), }; let message = bundle.get_message(&identifier).expect("missing diagnostic in fluent bundle"); let value = match attr { Some(attr) => { message.get_attribute(attr).expect("missing attribute in fluent message").value() } None => message.value().expect("missing value in fluent message"), }; let mut err = vec![]; let translated = bundle.format_pattern(value, Some(&args), &mut err); trace!(?translated, ?err); debug_assert!( err.is_empty(), "identifier: {:?}, args: {:?}, errors: {:?}", identifier, args, err ); translated } /// Formats the substitutions of the primary_span /// /// There are a lot of conditions to this method, but in short: /// /// * If the current `Diagnostic` has only one visible `CodeSuggestion`, /// we format the `help` suggestion depending on the content of the /// substitutions. In that case, we return the modified span only. /// /// * If the current `Diagnostic` has multiple suggestions, /// we return the original `primary_span` and the original suggestions. fn primary_span_formatted<'a>( &mut self, diag: &'a Diagnostic, fluent_args: &FluentArgs<'_>, ) -> (MultiSpan, &'a [CodeSuggestion]) { let mut primary_span = diag.span.clone(); let suggestions = diag.suggestions.as_ref().map_or(&[][..], |suggestions| &suggestions[..]); if let Some((sugg, rest)) = suggestions.split_first() { let msg = self.translate_message(&sugg.msg, fluent_args); if rest.is_empty() && // ^ if there is only one suggestion // don't display multi-suggestions as labels sugg.substitutions.len() == 1 && // don't display multipart suggestions as labels sugg.substitutions[0].parts.len() == 1 && // don't display long messages as labels msg.split_whitespace().count() < 10 && // don't display multiline suggestions as labels !sugg.substitutions[0].parts[0].snippet.contains('\n') && ![ // when this style is set we want the suggestion to be a message, not inline SuggestionStyle::HideCodeAlways, // trivial suggestion for tooling's sake, never shown SuggestionStyle::CompletelyHidden, // subtle suggestion, never shown inline SuggestionStyle::ShowAlways, ].contains(&sugg.style) { let substitution = &sugg.substitutions[0].parts[0].snippet.trim(); let msg = if substitution.is_empty() || sugg.style.hide_inline() { // This substitution is only removal OR we explicitly don't want to show the // code inline (`hide_inline`). Therefore, we don't show the substitution. format!("help: {}", &msg) } else { // Show the default suggestion text with the substitution format!( "help: {}{}: `{}`", &msg, if self .source_map() .map(|sm| is_case_difference( &**sm, substitution, sugg.substitutions[0].parts[0].span, )) .unwrap_or(false) { " (notice the capitalization)" } else { "" }, substitution, ) }; primary_span.push_span_label(sugg.substitutions[0].parts[0].span, msg); // We return only the modified primary_span (primary_span, &[]) } else { // if there are multiple suggestions, print them all in full // to be consistent. We could try to figure out if we can // make one (or the first one) inline, but that would give // undue importance to a semi-random suggestion (primary_span, suggestions) } } else { (primary_span, suggestions) } } fn fix_multispans_in_extern_macros_and_render_macro_backtrace( &self, source_map: &Option<Lrc<SourceMap>>, span: &mut MultiSpan, children: &mut Vec<SubDiagnostic>, level: &Level, backtrace: bool, ) { // Check for spans in macros, before `fix_multispans_in_extern_macros` // has a chance to replace them. let has_macro_spans = iter::once(&*span) .chain(children.iter().map(|child| &child.span)) .flat_map(|span| span.primary_spans()) .flat_map(|sp| sp.macro_backtrace()) .find_map(|expn_data| { match expn_data.kind { ExpnKind::Root => None, // Skip past non-macro entries, just in case there // are some which do actually involve macros. ExpnKind::Inlined | ExpnKind::Desugaring(..) | ExpnKind::AstPass(..) => None, ExpnKind::Macro(macro_kind, name) => Some((macro_kind, name)), } }); if !backtrace { self.fix_multispans_in_extern_macros(source_map, span, children); } self.render_multispans_macro_backtrace(span, children, backtrace); if !backtrace { if let Some((macro_kind, name)) = has_macro_spans { let descr = macro_kind.descr(); let msg = format!( "this {level} originates in the {descr} `{name}` \ (in Nightly builds, run with -Z macro-backtrace for more info)", ); children.push(SubDiagnostic { level: Level::Note, message: vec![(DiagnosticMessage::Str(msg), Style::NoStyle)], span: MultiSpan::new(), render_span: None, }); } } } fn render_multispans_macro_backtrace( &self, span: &mut MultiSpan, children: &mut Vec<SubDiagnostic>, backtrace: bool, ) { for span in iter::once(span).chain(children.iter_mut().map(|child| &mut child.span)) { self.render_multispan_macro_backtrace(span, backtrace); } } fn render_multispan_macro_backtrace(&self, span: &mut MultiSpan, always_backtrace: bool) { let mut new_labels: Vec<(Span, String)> = vec![]; for &sp in span.primary_spans() { if sp.is_dummy() { continue; } // FIXME(eddyb) use `retain` on `macro_backtrace` to remove all the // entries we don't want to print, to make sure the indices being // printed are contiguous (or omitted if there's only one entry). let macro_backtrace: Vec<_> = sp.macro_backtrace().collect(); for (i, trace) in macro_backtrace.iter().rev().enumerate() { if trace.def_site.is_dummy() { continue; } if always_backtrace && !matches!(trace.kind, ExpnKind::Inlined) { new_labels.push(( trace.def_site, format!( "in this expansion of `{}`{}", trace.kind.descr(), if macro_backtrace.len() > 1 { // if macro_backtrace.len() == 1 it'll be // pointed at by "in this macro invocation" format!(" (#{})", i + 1) } else { String::new() }, ), )); } // Don't add a label on the call site if the diagnostic itself // already points to (a part of) that call site, as the label // is meant for showing the relevant invocation when the actual // diagnostic is pointing to some part of macro definition. // // This also handles the case where an external span got replaced // with the call site span by `fix_multispans_in_extern_macros`. // // NB: `-Zmacro-backtrace` overrides this, for uniformity, as the // "in this expansion of" label above is always added in that mode, // and it needs an "in this macro invocation" label to match that. let redundant_span = trace.call_site.contains(sp); if !redundant_span || always_backtrace { let msg: Cow<'static, _> = match trace.kind { ExpnKind::Macro(MacroKind::Attr, _) => { "this procedural macro expansion".into() } ExpnKind::Macro(MacroKind::Derive, _) => { "this derive macro expansion".into() } ExpnKind::Macro(MacroKind::Bang, _) => "this macro invocation".into(), ExpnKind::Inlined => "this inlined function call".into(), ExpnKind::Root => "the crate root".into(), ExpnKind::AstPass(kind) => kind.descr().into(), ExpnKind::Desugaring(kind) => { format!("this {} desugaring", kind.descr()).into() } }; new_labels.push(( trace.call_site, format!( "in {}{}", msg, if macro_backtrace.len() > 1 && always_backtrace { // only specify order when the macro // backtrace is multiple levels deep format!(" (#{})", i + 1) } else { String::new() }, ), )); } if !always_backtrace { break; } } } for (label_span, label_text) in new_labels { span.push_span_label(label_span, label_text); } } // This does a small "fix" for multispans by looking to see if it can find any that // point directly at external macros. Since these are often difficult to read, // this will change the span to point at the use site. fn fix_multispans_in_extern_macros( &self, source_map: &Option<Lrc<SourceMap>>, span: &mut MultiSpan, children: &mut Vec<SubDiagnostic>, ) { let Some(source_map) = source_map else { return }; debug!("fix_multispans_in_extern_macros: before: span={:?} children={:?}", span, children); self.fix_multispan_in_extern_macros(source_map, span); for child in children.iter_mut() { self.fix_multispan_in_extern_macros(source_map, &mut child.span); } debug!("fix_multispans_in_extern_macros: after: span={:?} children={:?}", span, children); } // This "fixes" MultiSpans that contain `Span`s pointing to locations inside of external macros. // Since these locations are often difficult to read, // we move these spans from the external macros to their corresponding use site. fn fix_multispan_in_extern_macros(&self, source_map: &Lrc<SourceMap>, span: &mut MultiSpan) { // First, find all the spans in external macros and point instead at their use site. let replacements: Vec<(Span, Span)> = span .primary_spans() .iter() .copied() .chain(span.span_labels().iter().map(|sp_label| sp_label.span)) .filter_map(|sp| { if !sp.is_dummy() && source_map.is_imported(sp) { let maybe_callsite = sp.source_callsite(); if sp != maybe_callsite { return Some((sp, maybe_callsite)); } } None }) .collect(); // After we have them, make sure we replace these 'bad' def sites with their use sites. for (from, to) in replacements { span.replace(from, to); } } } impl Emitter for EmitterWriter { fn source_map(&self) -> Option<&Lrc<SourceMap>> { self.sm.as_ref() } fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> { self.fluent_bundle.as_ref() } fn fallback_fluent_bundle(&self) -> &FluentBundle { &**self.fallback_bundle } fn emit_diagnostic(&mut self, diag: &Diagnostic) { let fluent_args = self.to_fluent_args(diag.args()); let mut children = diag.children.clone(); let (mut primary_span, suggestions) = self.primary_span_formatted(&diag, &fluent_args); debug!("emit_diagnostic: suggestions={:?}", suggestions); self.fix_multispans_in_extern_macros_and_render_macro_backtrace( &self.sm, &mut primary_span, &mut children, &diag.level, self.macro_backtrace, ); self.emit_messages_default( &diag.level, &diag.message, &fluent_args, &diag.code, &primary_span, &children, &suggestions, ); } fn should_show_explain(&self) -> bool { !self.short_message } fn supports_color(&self) -> bool { self.dst.supports_color() } } /// An emitter that does nothing when emitting a non-fatal diagnostic. /// Fatal diagnostics are forwarded to `fatal_handler` to avoid silent /// failures of rustc, as witnessed e.g. in issue #89358. pub struct SilentEmitter { pub fatal_handler: Handler, pub fatal_note: Option<String>, } impl Emitter for SilentEmitter { fn source_map(&self) -> Option<&Lrc<SourceMap>> { None } fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> { None } fn fallback_fluent_bundle(&self) -> &FluentBundle { panic!("silent emitter attempted to translate message") } fn emit_diagnostic(&mut self, d: &Diagnostic) { if d.level == Level::Fatal { let mut d = d.clone(); if let Some(ref note) = self.fatal_note { d.note(note); } self.fatal_handler.emit_diagnostic(&mut d); } } } /// Maximum number of lines we will print for a multiline suggestion; arbitrary. /// /// This should be replaced with a more involved mechanism to output multiline suggestions that /// more closely mimics the regular diagnostic output, where irrelevant code lines are elided. pub const MAX_SUGGESTION_HIGHLIGHT_LINES: usize = 6; /// Maximum number of suggestions to be shown /// /// Arbitrary, but taken from trait import suggestion limit pub const MAX_SUGGESTIONS: usize = 4; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ColorConfig { Auto, Always, Never, } impl ColorConfig { fn to_color_choice(self) -> ColorChoice { match self { ColorConfig::Always => { if atty::is(atty::Stream::Stderr) { ColorChoice::Always } else { ColorChoice::AlwaysAnsi } } ColorConfig::Never => ColorChoice::Never, ColorConfig::Auto if atty::is(atty::Stream::Stderr) => ColorChoice::Auto, ColorConfig::Auto => ColorChoice::Never, } } fn suggests_using_colors(self) -> bool { match self { ColorConfig::Always | ColorConfig::Auto => true, ColorConfig::Never => false, } } } /// Handles the writing of `HumanReadableErrorType::Default` and `HumanReadableErrorType::Short` pub struct EmitterWriter { dst: Destination, sm: Option<Lrc<SourceMap>>, fluent_bundle: Option<Lrc<FluentBundle>>, fallback_bundle: LazyFallbackBundle, short_message: bool, teach: bool, ui_testing: bool, terminal_width: Option<usize>, macro_backtrace: bool, } #[derive(Debug)] pub struct FileWithAnnotatedLines { pub file: Lrc<SourceFile>, pub lines: Vec<Line>, multiline_depth: usize, } impl EmitterWriter { pub fn stderr( color_config: ColorConfig, source_map: Option<Lrc<SourceMap>>, fluent_bundle: Option<Lrc<FluentBundle>>, fallback_bundle: LazyFallbackBundle, short_message: bool, teach: bool, terminal_width: Option<usize>, macro_backtrace: bool, ) -> EmitterWriter { let dst = Destination::from_stderr(color_config); EmitterWriter { dst, sm: source_map, fluent_bundle, fallback_bundle, short_message, teach, ui_testing: false, terminal_width, macro_backtrace, } } pub fn new( dst: Box<dyn Write + Send>, source_map: Option<Lrc<SourceMap>>, fluent_bundle: Option<Lrc<FluentBundle>>, fallback_bundle: LazyFallbackBundle, short_message: bool, teach: bool, colored: bool, terminal_width: Option<usize>, macro_backtrace: bool, ) -> EmitterWriter { EmitterWriter { dst: Raw(dst, colored), sm: source_map, fluent_bundle, fallback_bundle, short_message, teach, ui_testing: false, terminal_width, macro_backtrace, } } pub fn ui_testing(mut self, ui_testing: bool) -> Self { self.ui_testing = ui_testing; self } fn maybe_anonymized(&self, line_num: usize) -> String { if self.ui_testing { ANONYMIZED_LINE_NUM.to_string() } else { line_num.to_string() } } fn draw_line( &self, buffer: &mut StyledBuffer, source_string: &str, line_index: usize, line_offset: usize, width_offset: usize, code_offset: usize, margin: Margin, ) { // Tabs are assumed to have been replaced by spaces in calling code. debug_assert!(!source_string.contains('\t')); let line_len = source_string.len(); // Create the source line we will highlight. let left = margin.left(line_len); let right = margin.right(line_len); // On long lines, we strip the source line, accounting for unicode. let mut taken = 0; let code: String = source_string .chars() .skip(left) .take_while(|ch| { // Make sure that the trimming on the right will fall within the terminal width. // FIXME: `unicode_width` sometimes disagrees with terminals on how wide a `char` is. // For now, just accept that sometimes the code line will be longer than desired. let next = unicode_width::UnicodeWidthChar::width(*ch).unwrap_or(1); if taken + next > right - left { return false; } taken += next; true }) .collect(); buffer.puts(line_offset, code_offset, &code, Style::Quotation); if margin.was_cut_left() { // We have stripped some code/whitespace from the beginning, make it clear. buffer.puts(line_offset, code_offset, "...", Style::LineNumber); } if margin.was_cut_right(line_len) { // We have stripped some code after the right-most span end, make it clear we did so. buffer.puts(line_offset, code_offset + taken - 3, "...", Style::LineNumber); } buffer.puts(line_offset, 0, &self.maybe_anonymized(line_index), Style::LineNumber); draw_col_separator(buffer, line_offset, width_offset - 2); } fn render_source_line( &self, buffer: &mut StyledBuffer, file: Lrc<SourceFile>, line: &Line, width_offset: usize, code_offset: usize, margin: Margin, ) -> Vec<(usize, Style)> { // Draw: // // LL | ... code ... // | ^^-^ span label // | | // | secondary span label // // ^^ ^ ^^^ ^^^^ ^^^ we don't care about code too far to the right of a span, we trim it // | | | | // | | | actual code found in your source code and the spans we use to mark it // | | when there's too much wasted space to the left, trim it // | vertical divider between the column number and the code // column number if line.line_index == 0 { return Vec::new(); } let source_string = match file.get_line(line.line_index - 1) { Some(s) => normalize_whitespace(&*s), None => return Vec::new(), }; let line_offset = buffer.num_lines(); // Left trim let left = margin.left(source_string.len()); // Account for unicode characters of width !=0 that were removed. let left = source_string .chars() .take(left) .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1)) .sum(); self.draw_line( buffer, &source_string, line.line_index, line_offset, width_offset, code_offset, margin, ); // Special case when there's only one annotation involved, it is the start of a multiline // span and there's no text at the beginning of the code line. Instead of doing the whole // graph: // // 2 | fn foo() { // | _^ // 3 | | // 4 | | } // | |_^ test // // we simplify the output to: // // 2 | / fn foo() { // 3 | | // 4 | | } // | |_^ test if let [ann] = &line.annotations[..] { if let AnnotationType::MultilineStart(depth) = ann.annotation_type { if source_string.chars().take(ann.start_col).all(|c| c.is_whitespace()) { let style = if ann.is_primary { Style::UnderlinePrimary } else { Style::UnderlineSecondary }; buffer.putc(line_offset, width_offset + depth - 1, '/', style); return vec![(depth, style)]; } } } // We want to display like this: // // vec.push(vec.pop().unwrap()); // --- ^^^ - previous borrow ends here // | | // | error occurs here // previous borrow of `vec` occurs here // // But there are some weird edge cases to be aware of: // // vec.push(vec.pop().unwrap()); // -------- - previous borrow ends here // || // |this makes no sense // previous borrow of `vec` occurs here // // For this reason, we group the lines into "highlight lines" // and "annotations lines", where the highlight lines have the `^`. // Sort the annotations by (start, end col) // The labels are reversed, sort and then reversed again. // Consider a list of annotations (A1, A2, C1, C2, B1, B2) where // the letter signifies the span. Here we are only sorting by the // span and hence, the order of the elements with the same span will // not change. On reversing the ordering (|a, b| but b.cmp(a)), you get // (C1, C2, B1, B2, A1, A2). All the elements with the same span are // still ordered first to last, but all the elements with different // spans are ordered by their spans in last to first order. Last to // first order is important, because the jiggly lines and | are on // the left, so the rightmost span needs to be rendered first, // otherwise the lines would end up needing to go over a message. let mut annotations = line.annotations.clone(); annotations.sort_by_key(|a| Reverse(a.start_col)); // First, figure out where each label will be positioned. // // In the case where you have the following annotations: // // vec.push(vec.pop().unwrap()); // -------- - previous borrow ends here [C] // || // |this makes no sense [B] // previous borrow of `vec` occurs here [A] // // `annotations_position` will hold [(2, A), (1, B), (0, C)]. // // We try, when possible, to stick the rightmost annotation at the end // of the highlight line: // // vec.push(vec.pop().unwrap()); // --- --- - previous borrow ends here // // But sometimes that's not possible because one of the other // annotations overlaps it. For example, from the test // `span_overlap_label`, we have the following annotations // (written on distinct lines for clarity): // // fn foo(x: u32) { // -------------- // - // // In this case, we can't stick the rightmost-most label on // the highlight line, or we would get: // // fn foo(x: u32) { // -------- x_span // | // fn_span // // which is totally weird. Instead we want: // // fn foo(x: u32) { // -------------- // | | // | x_span // fn_span // // which is...less weird, at least. In fact, in general, if // the rightmost span overlaps with any other span, we should // use the "hang below" version, so we can at least make it // clear where the span *starts*. There's an exception for this // logic, when the labels do not have a message: // // fn foo(x: u32) { // -------------- // | // x_span // // instead of: // // fn foo(x: u32) { // -------------- // | | // | x_span // <EMPTY LINE> // let mut annotations_position = vec![]; let mut line_len = 0; let mut p = 0; for (i, annotation) in annotations.iter().enumerate() { for (j, next) in annotations.iter().enumerate() { if overlaps(next, annotation, 0) // This label overlaps with another one and both && annotation.has_label() // take space (they have text and are not && j > i // multiline lines). && p == 0 // We're currently on the first line, move the label one line down { // If we're overlapping with an un-labelled annotation with the same span // we can just merge them in the output if next.start_col == annotation.start_col && next.end_col == annotation.end_col && !next.has_label() { continue; } // This annotation needs a new line in the output. p += 1; break; } } annotations_position.push((p, annotation)); for (j, next) in annotations.iter().enumerate() { if j > i { let l = next.label.as_ref().map_or(0, |label| label.len() + 2); if (overlaps(next, annotation, l) // Do not allow two labels to be in the same // line if they overlap including padding, to // avoid situations like: // // fn foo(x: u32) { // -------^------ // | | // fn_spanx_span // && annotation.has_label() // Both labels must have some text, otherwise && next.has_label()) // they are not overlapping. // Do not add a new line if this annotation // or the next are vertical line placeholders. || (annotation.takes_space() // If either this or the next annotation is && next.has_label()) // multiline start/end, move it to a new line || (annotation.has_label() // so as not to overlap the horizontal lines. && next.takes_space()) || (annotation.takes_space() && next.takes_space()) || (overlaps(next, annotation, l) && next.end_col <= annotation.end_col && next.has_label() && p == 0) // Avoid #42595. { // This annotation needs a new line in the output. p += 1; break; } } } line_len = max(line_len, p); } if line_len != 0 { line_len += 1; } // If there are no annotations or the only annotations on this line are // MultilineLine, then there's only code being shown, stop processing. if line.annotations.iter().all(|a| a.is_line()) { return vec![]; } // Write the column separator. // // After this we will have: // // 2 | fn foo() { // | // | // | // 3 | // 4 | } // | for pos in 0..=line_len { draw_col_separator(buffer, line_offset + pos + 1, width_offset - 2); } // Write the horizontal lines for multiline annotations // (only the first and last lines need this). // // After this we will have: // // 2 | fn foo() { // | __________ // | // | // 3 | // 4 | } // | _ for &(pos, annotation) in &annotations_position { let style = if annotation.is_primary { Style::UnderlinePrimary } else { Style::UnderlineSecondary }; let pos = pos + 1; match annotation.annotation_type { AnnotationType::MultilineStart(depth) | AnnotationType::MultilineEnd(depth) => { draw_range( buffer, '_', line_offset + pos, width_offset + depth, (code_offset + annotation.start_col).saturating_sub(left), style, ); } _ if self.teach => { buffer.set_style_range( line_offset, (code_offset + annotation.start_col).saturating_sub(left), (code_offset + annotation.end_col).saturating_sub(left), style, annotation.is_primary, ); } _ => {} } } // Write the vertical lines for labels that are on a different line as the underline. // // After this we will have: // // 2 | fn foo() { // | __________ // | | | // | | // 3 | | // 4 | | } // | |_ for &(pos, annotation) in &annotations_position { let style = if annotation.is_primary { Style::UnderlinePrimary } else { Style::UnderlineSecondary }; let pos = pos + 1; if pos > 1 && (annotation.has_label() || annotation.takes_space()) { for p in line_offset + 1..=line_offset + pos { buffer.putc( p, (code_offset + annotation.start_col).saturating_sub(left), '|', style, ); } } match annotation.annotation_type { AnnotationType::MultilineStart(depth) => { for p in line_offset + pos + 1..line_offset + line_len + 2 { buffer.putc(p, width_offset + depth - 1, '|', style); } } AnnotationType::MultilineEnd(depth) => { for p in line_offset..=line_offset + pos { buffer.putc(p, width_offset + depth - 1, '|', style); } } _ => (), } } // Write the labels on the annotations that actually have a label. // // After this we will have: // // 2 | fn foo() { // | __________ // | | // | something about `foo` // 3 | // 4 | } // | _ test for &(pos, annotation) in &annotations_position { let style = if annotation.is_primary { Style::LabelPrimary } else { Style::LabelSecondary }; let (pos, col) = if pos == 0 { (pos + 1, (annotation.end_col + 1).saturating_sub(left)) } else { (pos + 2, annotation.start_col.saturating_sub(left)) }; if let Some(ref label) = annotation.label { buffer.puts(line_offset + pos, code_offset + col, &label, style); } } // Sort from biggest span to smallest span so that smaller spans are // represented in the output: // // x | fn foo() // | ^^^---^^ // | | | // | | something about `foo` // | something about `fn foo()` annotations_position.sort_by_key(|(_, ann)| { // Decreasing order. When annotations share the same length, prefer `Primary`. (Reverse(ann.len()), ann.is_primary) }); // Write the underlines. // // After this we will have: // // 2 | fn foo() { // | ____-_____^ // | | // | something about `foo` // 3 | // 4 | } // | _^ test for &(_, annotation) in &annotations_position { let (underline, style) = if annotation.is_primary { ('^', Style::UnderlinePrimary) } else { ('-', Style::UnderlineSecondary) }; for p in annotation.start_col..annotation.end_col { buffer.putc( line_offset + 1, (code_offset + p).saturating_sub(left), underline, style, ); } } annotations_position .iter() .filter_map(|&(_, annotation)| match annotation.annotation_type { AnnotationType::MultilineStart(p) | AnnotationType::MultilineEnd(p) => { let style = if annotation.is_primary { Style::LabelPrimary } else { Style::LabelSecondary }; Some((p, style)) } _ => None, }) .collect::<Vec<_>>() } fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize { let Some(ref sm) = self.sm else { return 0; }; let mut max = 0; for primary_span in msp.primary_spans() { if !primary_span.is_dummy() { let hi = sm.lookup_char_pos(primary_span.hi()); max = (hi.line).max(max); } } if !self.short_message { for span_label in msp.span_labels() { if !span_label.span.is_dummy() { let hi = sm.lookup_char_pos(span_label.span.hi()); max = (hi.line).max(max); } } } max } fn get_max_line_num(&mut self, span: &MultiSpan, children: &[SubDiagnostic]) -> usize { let primary = self.get_multispan_max_line_num(span); children .iter() .map(|sub| self.get_multispan_max_line_num(&sub.span)) .max() .unwrap_or(0) .max(primary) } /// Adds a left margin to every line but the first, given a padding length and the label being /// displayed, keeping the provided highlighting. fn msg_to_buffer( &self, buffer: &mut StyledBuffer, msg: &[(DiagnosticMessage, Style)], args: &FluentArgs<'_>, padding: usize, label: &str, override_style: Option<Style>, ) { // The extra 5 ` ` is padding that's always needed to align to the `note: `: // // error: message // --> file.rs:13:20 // | // 13 | <CODE> // | ^^^^ // | // = note: multiline // message // ++^^^----xx // | | | | // | | | magic `2` // | | length of label // | magic `3` // `max_line_num_len` let padding = " ".repeat(padding + label.len() + 5); /// Returns `override` if it is present and `style` is `NoStyle` or `style` otherwise fn style_or_override(style: Style, override_: Option<Style>) -> Style { match (style, override_) { (Style::NoStyle, Some(override_)) => override_, _ => style, } } let mut line_number = 0; // Provided the following diagnostic message: // // let msg = vec![ // (" // ("highlighted multiline\nstring to\nsee how it ", Style::NoStyle), // ("looks", Style::Highlight), // ("with\nvery ", Style::NoStyle), // ("weird", Style::Highlight), // (" formats\n", Style::NoStyle), // ("see?", Style::Highlight), // ]; // // the expected output on a note is (* surround the highlighted text) // // = note: highlighted multiline // string to // see how it *looks* with // very *weird* formats // see? for &(ref text, ref style) in msg.iter() { let text = self.translate_message(text, args); let lines = text.split('\n').collect::<Vec<_>>(); if lines.len() > 1 { for (i, line) in lines.iter().enumerate() { if i != 0 { line_number += 1; buffer.append(line_number, &padding, Style::NoStyle); } buffer.append(line_number, line, style_or_override(*style, override_style)); } } else { buffer.append(line_number, &text, style_or_override(*style, override_style)); } } } fn emit_message_default( &mut self, msp: &MultiSpan, msg: &[(DiagnosticMessage, Style)], args: &FluentArgs<'_>, code: &Option<DiagnosticId>, level: &Level, max_line_num_len: usize, is_secondary: bool, ) -> io::Result<()> { let mut buffer = StyledBuffer::new(); if !msp.has_primary_spans() && !msp.has_span_labels() && is_secondary && !self.short_message { // This is a secondary message with no span info for _ in 0..max_line_num_len { buffer.prepend(0, " ", Style::NoStyle); } draw_note_separator(&mut buffer, 0, max_line_num_len + 1); if *level != Level::FailureNote { buffer.append(0, level.to_str(), Style::MainHeaderMsg); buffer.append(0, ": ", Style::NoStyle); } self.msg_to_buffer(&mut buffer, msg, args, max_line_num_len, "note", None); } else { let mut label_width = 0; // The failure note level itself does not provide any useful diagnostic information if *level != Level::FailureNote { buffer.append(0, level.to_str(), Style::Level(*level)); label_width += level.to_str().len(); } // only render error codes, not lint codes if let Some(DiagnosticId::Error(ref code)) = *code { buffer.append(0, "[", Style::Level(*level)); buffer.append(0, &code, Style::Level(*level)); buffer.append(0, "]", Style::Level(*level)); label_width += 2 + code.len(); } let header_style = if is_secondary { Style::HeaderMsg } else { Style::MainHeaderMsg }; if *level != Level::FailureNote { buffer.append(0, ": ", header_style); label_width += 2; } for &(ref text, _) in msg.iter() { let text = self.translate_message(text, args); // Account for newlines to align output to its label. for (line, text) in normalize_whitespace(&text).lines().enumerate() { buffer.append( 0 + line, &format!( "{}{}", if line == 0 { String::new() } else { " ".repeat(label_width) }, text ), header_style, ); } } } let mut annotated_files = FileWithAnnotatedLines::collect_annotations(self, args, msp); // Make sure our primary file comes first let (primary_lo, sm) = if let (Some(sm), Some(ref primary_span)) = (self.sm.as_ref(), msp.primary_span().as_ref()) { if !primary_span.is_dummy() { (sm.lookup_char_pos(primary_span.lo()), sm) } else { emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?; return Ok(()); } } else { // If we don't have span information, emit and exit emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?; return Ok(()); }; if let Ok(pos) = annotated_files.binary_search_by(|x| x.file.name.cmp(&primary_lo.file.name)) { annotated_files.swap(0, pos); } // Print out the annotate source lines that correspond with the error for annotated_file in annotated_files { // we can't annotate anything if the source is unavailable. if !sm.ensure_source_file_source_present(annotated_file.file.clone()) { continue; } // print out the span location and spacer before we print the annotated source // to do this, we need to know if this span will be primary let is_primary = primary_lo.file.name == annotated_file.file.name; if is_primary { let loc = primary_lo.clone(); if !self.short_message { // remember where we are in the output buffer for easy reference let buffer_msg_line_offset = buffer.num_lines(); buffer.prepend(buffer_msg_line_offset, "--> ", Style::LineNumber); buffer.append( buffer_msg_line_offset, &format!( "{}:{}:{}", sm.filename_for_diagnostics(&loc.file.name), sm.doctest_offset_line(&loc.file.name, loc.line), loc.col.0 + 1, ), Style::LineAndColumn, ); for _ in 0..max_line_num_len { buffer.prepend(buffer_msg_line_offset, " ", Style::NoStyle); } } else { buffer.prepend( 0, &format!( "{}:{}:{}: ", sm.filename_for_diagnostics(&loc.file.name), sm.doctest_offset_line(&loc.file.name, loc.line), loc.col.0 + 1, ), Style::LineAndColumn, ); } } else if !self.short_message { // remember where we are in the output buffer for easy reference let buffer_msg_line_offset = buffer.num_lines(); // Add spacing line draw_col_separator_no_space( &mut buffer, buffer_msg_line_offset, max_line_num_len + 1, ); // Then, the secondary file indicator buffer.prepend(buffer_msg_line_offset + 1, "::: ", Style::LineNumber); let loc = if let Some(first_line) = annotated_file.lines.first() { let col = if let Some(first_annotation) = first_line.annotations.first() { format!(":{}", first_annotation.start_col + 1) } else { String::new() }; format!( "{}:{}{}", sm.filename_for_diagnostics(&annotated_file.file.name), sm.doctest_offset_line(&annotated_file.file.name, first_line.line_index), col ) } else { format!("{}", sm.filename_for_diagnostics(&annotated_file.file.name)) }; buffer.append(buffer_msg_line_offset + 1, &loc, Style::LineAndColumn); for _ in 0..max_line_num_len { buffer.prepend(buffer_msg_line_offset + 1, " ", Style::NoStyle); } } if !self.short_message { // Put in the spacer between the location and annotated source let buffer_msg_line_offset = buffer.num_lines(); draw_col_separator_no_space( &mut buffer, buffer_msg_line_offset, max_line_num_len + 1, ); // Contains the vertical lines' positions for active multiline annotations let mut multilines = FxHashMap::default(); // Get the left-side margin to remove it let mut whitespace_margin = usize::MAX; for line_idx in 0..annotated_file.lines.len() { let file = annotated_file.file.clone(); let line = &annotated_file.lines[line_idx]; if let Some(source_string) = file.get_line(line.line_index - 1) { let leading_whitespace = source_string .chars() .take_while(|c| c.is_whitespace()) .map(|c| { match c { // Tabs are displayed as 4 spaces '\t' => 4, _ => 1, } }) .sum(); if source_string.chars().any(|c| !c.is_whitespace()) { whitespace_margin = min(whitespace_margin, leading_whitespace); } } } if whitespace_margin == usize::MAX { whitespace_margin = 0; } // Left-most column any visible span points at. let mut span_left_margin = usize::MAX; for line in &annotated_file.lines { for ann in &line.annotations { span_left_margin = min(span_left_margin, ann.start_col); span_left_margin = min(span_left_margin, ann.end_col); } } if span_left_margin == usize::MAX { span_left_margin = 0; } // Right-most column any visible span points at. let mut span_right_margin = 0; let mut label_right_margin = 0; let mut max_line_len = 0; for line in &annotated_file.lines { max_line_len = max( max_line_len, annotated_file.file.get_line(line.line_index - 1).map_or(0, |s| s.len()), ); for ann in &line.annotations { span_right_margin = max(span_right_margin, ann.start_col); span_right_margin = max(span_right_margin, ann.end_col); // FIXME: account for labels not in the same line let label_right = ann.label.as_ref().map_or(0, |l| l.len() + 1); label_right_margin = max(label_right_margin, ann.end_col + label_right); } } let width_offset = 3 + max_line_num_len; let code_offset = if annotated_file.multiline_depth == 0 { width_offset } else { width_offset + annotated_file.multiline_depth + 1 }; let column_width = if let Some(width) = self.terminal_width { width.saturating_sub(code_offset) } else if self.ui_testing { DEFAULT_COLUMN_WIDTH } else { termize::dimensions() .map(|(w, _)| w.saturating_sub(code_offset)) .unwrap_or(DEFAULT_COLUMN_WIDTH) }; let margin = Margin::new( whitespace_margin, span_left_margin, span_right_margin, label_right_margin, column_width, max_line_len, ); // Next, output the annotate source for this file for line_idx in 0..annotated_file.lines.len() { let previous_buffer_line = buffer.num_lines(); let depths = self.render_source_line( &mut buffer, annotated_file.file.clone(), &annotated_file.lines[line_idx], width_offset, code_offset, margin, ); let mut to_add = FxHashMap::default(); for (depth, style) in depths { if multilines.remove(&depth).is_none() { to_add.insert(depth, style); } } // Set the multiline annotation vertical lines to the left of // the code in this line. for (depth, style) in &multilines { for line in previous_buffer_line..buffer.num_lines() { draw_multiline_line(&mut buffer, line, width_offset, *depth, *style); } } // check to see if we need to print out or elide lines that come between // this annotated line and the next one. if line_idx < (annotated_file.lines.len() - 1) { let line_idx_delta = annotated_file.lines[line_idx + 1].line_index - annotated_file.lines[line_idx].line_index; if line_idx_delta > 2 { let last_buffer_line_num = buffer.num_lines(); buffer.puts(last_buffer_line_num, 0, "...", Style::LineNumber); // Set the multiline annotation vertical lines on `...` bridging line. for (depth, style) in &multilines { draw_multiline_line( &mut buffer, last_buffer_line_num, width_offset, *depth, *style, ); } } else if line_idx_delta == 2 { let unannotated_line = annotated_file .file .get_line(annotated_file.lines[line_idx].line_index) .unwrap_or_else(|| Cow::from("")); let last_buffer_line_num = buffer.num_lines(); self.draw_line( &mut buffer, &normalize_whitespace(&unannotated_line), annotated_file.lines[line_idx + 1].line_index - 1, last_buffer_line_num, width_offset, code_offset, margin, ); for (depth, style) in &multilines { draw_multiline_line( &mut buffer, last_buffer_line_num, width_offset, *depth, *style, ); } } } multilines.extend(&to_add); } } } // final step: take our styled buffer, render it, then output it emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?; Ok(()) } fn emit_suggestion_default( &mut self, suggestion: &CodeSuggestion, args: &FluentArgs<'_>, level: &Level, max_line_num_len: usize, ) -> io::Result<()> { let Some(ref sm) = self.sm else { return Ok(()); }; // Render the replacements for each suggestion let suggestions = suggestion.splice_lines(&**sm); debug!("emit_suggestion_default: suggestions={:?}", suggestions); if suggestions.is_empty() { // Suggestions coming from macros can have malformed spans. This is a heavy handed // approach to avoid ICEs by ignoring the suggestion outright. return Ok(()); } let mut buffer = StyledBuffer::new(); // Render the suggestion message buffer.append(0, level.to_str(), Style::Level(*level)); buffer.append(0, ": ", Style::HeaderMsg); self.msg_to_buffer( &mut buffer, &[(suggestion.msg.to_owned(), Style::NoStyle)], args, max_line_num_len, "suggestion", Some(Style::HeaderMsg), ); let mut row_num = 2; draw_col_separator_no_space(&mut buffer, 1, max_line_num_len + 1); let mut notice_capitalization = false; for (complete, parts, highlights, only_capitalization) in suggestions.iter().take(MAX_SUGGESTIONS) { notice_capitalization |= only_capitalization; let has_deletion = parts.iter().any(|p| p.is_deletion()); let is_multiline = complete.lines().count() > 1; enum DisplaySuggestion { Underline, Diff, None, } let show_code_change = if has_deletion && !is_multiline { DisplaySuggestion::Diff } else if (parts.len() != 1 || parts[0].snippet.trim() != complete.trim()) && !is_multiline { DisplaySuggestion::Underline } else { DisplaySuggestion::None }; if let DisplaySuggestion::Diff = show_code_change { row_num += 1; } let file_lines = sm .span_to_lines(parts[0].span) .expect("span_to_lines failed when emitting suggestion"); assert!(!file_lines.lines.is_empty() || parts[0].span.is_dummy()); let line_start = sm.lookup_char_pos(parts[0].span.lo()).line; draw_col_separator_no_space(&mut buffer, 1, max_line_num_len + 1); let mut lines = complete.lines(); if lines.clone().next().is_none() { // Account for a suggestion to completely remove a line(s) with whitespace (#94192). let line_end = sm.lookup_char_pos(parts[0].span.hi()).line; for line in line_start..=line_end { buffer.puts( row_num - 1 + line - line_start, 0, &self.maybe_anonymized(line), Style::LineNumber, ); buffer.puts( row_num - 1 + line - line_start, max_line_num_len + 1, "- ", Style::Removal, ); buffer.puts( row_num - 1 + line - line_start, max_line_num_len + 3, &normalize_whitespace(&*file_lines.file.get_line(line - 1).unwrap()), Style::Removal, ); } row_num += line_end - line_start; } for (line_pos, (line, highlight_parts)) in lines.by_ref().zip(highlights).take(MAX_SUGGESTION_HIGHLIGHT_LINES).enumerate() { // Print the span column to avoid confusion buffer.puts( row_num, 0, &self.maybe_anonymized(line_start + line_pos), Style::LineNumber, ); if let DisplaySuggestion::Diff = show_code_change { // Add the line number for both addition and removal to drive the point home. // // N - fn foo<A: T>(bar: A) { // N + fn foo(bar: impl T) { buffer.puts( row_num - 1, 0, &self.maybe_anonymized(line_start + line_pos), Style::LineNumber, ); buffer.puts(row_num - 1, max_line_num_len + 1, "- ", Style::Removal); buffer.puts( row_num - 1, max_line_num_len + 3, &normalize_whitespace( &*file_lines .file .get_line(file_lines.lines[line_pos].line_index) .unwrap(), ), Style::NoStyle, ); buffer.puts(row_num, max_line_num_len + 1, "+ ", Style::Addition); } else if is_multiline { match &highlight_parts[..] { [SubstitutionHighlight { start: 0, end }] if *end == line.len() => { buffer.puts(row_num, max_line_num_len + 1, "+ ", Style::Addition); } [] => { draw_col_separator(&mut buffer, row_num, max_line_num_len + 1); } _ => { buffer.puts(row_num, max_line_num_len + 1, "~ ", Style::Addition); } } } else { draw_col_separator(&mut buffer, row_num, max_line_num_len + 1); } // print the suggestion buffer.append(row_num, &normalize_whitespace(line), Style::NoStyle); // Colorize addition/replacements with green. for &SubstitutionHighlight { start, end } in highlight_parts { // Account for tabs when highlighting (#87972). let tabs: usize = line .chars() .take(start) .map(|ch| match ch { '\t' => 3, _ => 0, }) .sum(); buffer.set_style_range( row_num, max_line_num_len + 3 + start + tabs, max_line_num_len + 3 + end + tabs, Style::Addition, true, ); } row_num += 1; } // This offset and the ones below need to be signed to account for replacement code // that is shorter than the original code. let mut offsets: Vec<(usize, isize)> = Vec::new(); // Only show an underline in the suggestions if the suggestion is not the // entirety of the code being shown and the displayed code is not multiline. if let DisplaySuggestion::Diff | DisplaySuggestion::Underline = show_code_change { draw_col_separator(&mut buffer, row_num, max_line_num_len + 1); for part in parts { let span_start_pos = sm.lookup_char_pos(part.span.lo()).col_display; let span_end_pos = sm.lookup_char_pos(part.span.hi()).col_display; // Do not underline the leading... let start = part.snippet.len().saturating_sub(part.snippet.trim_start().len()); // ...or trailing spaces. Account for substitutions containing unicode // characters. let sub_len: usize = part .snippet .trim() .chars() .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1)) .sum(); let offset: isize = offsets .iter() .filter_map( |(start, v)| if span_start_pos <= *start { None } else { Some(v) }, ) .sum(); let underline_start = (span_start_pos + start) as isize + offset; let underline_end = (span_start_pos + start + sub_len) as isize + offset; assert!(underline_start >= 0 && underline_end >= 0); let padding: usize = max_line_num_len + 3; for p in underline_start..underline_end { if let DisplaySuggestion::Underline = show_code_change { // If this is a replacement, underline with `^`, if this is an addition // underline with `+`. buffer.putc( row_num, (padding as isize + p) as usize, if part.is_addition(&sm) { '+' } else { '~' }, Style::Addition, ); } } if let DisplaySuggestion::Diff = show_code_change { // Colorize removal with red in diff format. buffer.set_style_range( row_num - 2, (padding as isize + span_start_pos as isize) as usize, (padding as isize + span_end_pos as isize) as usize, Style::Removal, true, ); } // length of the code after substitution let full_sub_len = part .snippet .chars() .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1)) .sum::<usize>() as isize; // length of the code to be substituted let snippet_len = span_end_pos as isize - span_start_pos as isize; // For multiple substitutions, use the position *after* the previous // substitutions have happened, only when further substitutions are // located strictly after. offsets.push((span_end_pos, full_sub_len - snippet_len)); } row_num += 1; } // if we elided some lines, add an ellipsis if lines.next().is_some() { buffer.puts(row_num, max_line_num_len - 1, "...", Style::LineNumber); } else if let DisplaySuggestion::None = show_code_change { draw_col_separator_no_space(&mut buffer, row_num, max_line_num_len + 1); row_num += 1; } } if suggestions.len() > MAX_SUGGESTIONS { let others = suggestions.len() - MAX_SUGGESTIONS; let msg = format!("and {} other candidate{}", others, pluralize!(others)); buffer.puts(row_num, max_line_num_len + 3, &msg, Style::NoStyle); } else if notice_capitalization { let msg = "notice the capitalization difference"; buffer.puts(row_num, max_line_num_len + 3, &msg, Style::NoStyle); } emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?; Ok(()) } fn emit_messages_default( &mut self, level: &Level, message: &[(DiagnosticMessage, Style)], args: &FluentArgs<'_>, code: &Option<DiagnosticId>, span: &MultiSpan, children: &[SubDiagnostic], suggestions: &[CodeSuggestion], ) { let max_line_num_len = if self.ui_testing { ANONYMIZED_LINE_NUM.len() } else { let n = self.get_max_line_num(span, children); num_decimal_digits(n) }; match self.emit_message_default(span, message, args, code, level, max_line_num_len, false) { Ok(()) => { if !children.is_empty() || suggestions.iter().any(|s| s.style != SuggestionStyle::CompletelyHidden) { let mut buffer = StyledBuffer::new(); if !self.short_message { draw_col_separator_no_space(&mut buffer, 0, max_line_num_len + 1); } if let Err(e) = emit_to_destination( &buffer.render(), level, &mut self.dst, self.short_message, ) { panic!("failed to emit error: {}", e) } } if !self.short_message { for child in children { let span = child.render_span.as_ref().unwrap_or(&child.span); if let Err(err) = self.emit_message_default( &span, &child.message, args, &None, &child.level, max_line_num_len, true, ) { panic!("failed to emit error: {}", err); } } for sugg in suggestions { if sugg.style == SuggestionStyle::CompletelyHidden { // do not display this suggestion, it is meant only for tools } else if sugg.style == SuggestionStyle::HideCodeAlways { if let Err(e) = self.emit_message_default( &MultiSpan::new(), &[(sugg.msg.to_owned(), Style::HeaderMsg)], args, &None, &Level::Help, max_line_num_len, true, ) { panic!("failed to emit error: {}", e); } } else if let Err(e) = self.emit_suggestion_default(sugg, args, &Level::Help, max_line_num_len) { panic!("failed to emit error: {}", e); }; } } } Err(e) => panic!("failed to emit error: {}", e), } let mut dst = self.dst.writable(); match writeln!(dst) { Err(e) => panic!("failed to emit error: {}", e), _ => { if let Err(e) = dst.flush() { panic!("failed to emit error: {}", e) } } } } } impl FileWithAnnotatedLines { /// Preprocess all the annotations so that they are grouped by file and by line number /// This helps us quickly iterate over the whole message (including secondary file spans) pub fn collect_annotations( emitter: &dyn Emitter, args: &FluentArgs<'_>, msp: &MultiSpan, ) -> Vec<FileWithAnnotatedLines> { fn add_annotation_to_file( file_vec: &mut Vec<FileWithAnnotatedLines>, file: Lrc<SourceFile>, line_index: usize, ann: Annotation, ) { for slot in file_vec.iter_mut() { // Look through each of our files for the one we're adding to if slot.file.name == file.name { // See if we already have a line for it for line_slot in &mut slot.lines { if line_slot.line_index == line_index { line_slot.annotations.push(ann); return; } } // We don't have a line yet, create one slot.lines.push(Line { line_index, annotations: vec![ann] }); slot.lines.sort(); return; } } // This is the first time we're seeing the file file_vec.push(FileWithAnnotatedLines { file, lines: vec![Line { line_index, annotations: vec![ann] }], multiline_depth: 0, }); } let mut output = vec![]; let mut multiline_annotations = vec![]; if let Some(ref sm) = emitter.source_map() { for span_label in msp.span_labels() { if span_label.span.is_dummy() { continue; } let lo = sm.lookup_char_pos(span_label.span.lo()); let mut hi = sm.lookup_char_pos(span_label.span.hi()); // Watch out for "empty spans". If we get a span like 6..6, we // want to just display a `^` at 6, so convert that to // 6..7. This is degenerate input, but it's best to degrade // gracefully -- and the parser likes to supply a span like // that for EOF, in particular. if lo.col_display == hi.col_display && lo.line == hi.line { hi.col_display += 1; } if lo.line != hi.line { let ml = MultilineAnnotation { depth: 1, line_start: lo.line, line_end: hi.line, start_col: lo.col_display, end_col: hi.col_display, is_primary: span_label.is_primary, label: span_label .label .as_ref() .map(|m| emitter.translate_message(m, args).to_string()), overlaps_exactly: false, }; multiline_annotations.push((lo.file, ml)); } else { let ann = Annotation { start_col: lo.col_display, end_col: hi.col_display, is_primary: span_label.is_primary, label: span_label .label .as_ref() .map(|m| emitter.translate_message(m, args).to_string()), annotation_type: AnnotationType::Singleline, }; add_annotation_to_file(&mut output, lo.file, lo.line, ann); }; } } // Find overlapping multiline annotations, put them at different depths multiline_annotations.sort_by_key(|&(_, ref ml)| (ml.line_start, ml.line_end)); for (_, ann) in multiline_annotations.clone() { for (_, a) in multiline_annotations.iter_mut() { // Move all other multiline annotations overlapping with this one // one level to the right. if !(ann.same_span(a)) && num_overlap(ann.line_start, ann.line_end, a.line_start, a.line_end, true) { a.increase_depth(); } else if ann.same_span(a) && &ann != a { a.overlaps_exactly = true; } else { break; } } } let mut max_depth = 0; // max overlapping multiline spans for (file, ann) in multiline_annotations { max_depth = max(max_depth, ann.depth); let mut end_ann = ann.as_end(); if !ann.overlaps_exactly { // avoid output like // // | foo( // | _____^ // | |_____| // | || bar, // | || ); // | || ^ // | ||______| // | |______foo // | baz // // and instead get // // | foo( // | _____^ // | | bar, // | | ); // | | ^ // | | | // | |______foo // | baz add_annotation_to_file(&mut output, file.clone(), ann.line_start, ann.as_start()); // 4 is the minimum vertical length of a multiline span when presented: two lines // of code and two lines of underline. This is not true for the special case where // the beginning doesn't have an underline, but the current logic seems to be // working correctly. let middle = min(ann.line_start + 4, ann.line_end); for line in ann.line_start + 1..middle { // Every `|` that joins the beginning of the span (`___^`) to the end (`|__^`). add_annotation_to_file(&mut output, file.clone(), line, ann.as_line()); } let line_end = ann.line_end - 1; if middle < line_end { add_annotation_to_file(&mut output, file.clone(), line_end, ann.as_line()); } } else { end_ann.annotation_type = AnnotationType::Singleline; } add_annotation_to_file(&mut output, file, ann.line_end, end_ann); } for file_vec in output.iter_mut() { file_vec.multiline_depth = max_depth; } output } } // instead of taking the String length or dividing by 10 while > 0, we multiply a limit by 10 until // we're higher. If the loop isn't exited by the `return`, the last multiplication will wrap, which // is OK, because while we cannot fit a higher power of 10 in a usize, the loop will end anyway. // This is also why we need the max number of decimal digits within a `usize`. fn num_decimal_digits(num: usize) -> usize { #[cfg(target_pointer_width = "64")] const MAX_DIGITS: usize = 20; #[cfg(target_pointer_width = "32")] const MAX_DIGITS: usize = 10; #[cfg(target_pointer_width = "16")] const MAX_DIGITS: usize = 5; let mut lim = 10; for num_digits in 1..MAX_DIGITS { if num < lim { return num_digits; } lim = lim.wrapping_mul(10); } MAX_DIGITS } // We replace some characters so the CLI output is always consistent and underlines aligned. const OUTPUT_REPLACEMENTS: &[(char, &str)] = &[ ('\t', " "), // We do our own tab replacement ('\u{200D}', ""), // Replace ZWJ with nothing for consistent terminal output of grapheme clusters. ('\u{202A}', ""), // The following unicode text flow control characters are inconsistently ('\u{202B}', ""), // supported across CLIs and can cause confusion due to the bytes on disk ('\u{202D}', ""), // not corresponding to the visible source code, so we replace them always. ('\u{202E}', ""), ('\u{2066}', ""), ('\u{2067}', ""), ('\u{2068}', ""), ('\u{202C}', ""), ('\u{2069}', ""), ]; fn normalize_whitespace(str: &str) -> String { let mut s = str.to_string(); for (c, replacement) in OUTPUT_REPLACEMENTS { s = s.replace(*c, replacement); } s } fn draw_col_separator(buffer: &mut StyledBuffer, line: usize, col: usize) { buffer.puts(line, col, "| ", Style::LineNumber); } fn draw_col_separator_no_space(buffer: &mut StyledBuffer, line: usize, col: usize) { draw_col_separator_no_space_with_style(buffer, line, col, Style::LineNumber); } fn draw_col_separator_no_space_with_style( buffer: &mut StyledBuffer, line: usize, col: usize, style: Style, ) { buffer.putc(line, col, '|', style); } fn draw_range( buffer: &mut StyledBuffer, symbol: char, line: usize, col_from: usize, col_to: usize, style: Style, ) { for col in col_from..col_to { buffer.putc(line, col, symbol, style); } } fn draw_note_separator(buffer: &mut StyledBuffer, line: usize, col: usize) { buffer.puts(line, col, "= ", Style::LineNumber); } fn draw_multiline_line( buffer: &mut StyledBuffer, line: usize, offset: usize, depth: usize, style: Style, ) { buffer.putc(line, offset + depth - 1, '|', style); } fn num_overlap( a_start: usize, a_end: usize, b_start: usize, b_end: usize, inclusive: bool, ) -> bool { let extra = if inclusive { 1 } else { 0 }; (b_start..b_end + extra).contains(&a_start) || (a_start..a_end + extra).contains(&b_start) } fn overlaps(a1: &Annotation, a2: &Annotation, padding: usize) -> bool { num_overlap(a1.start_col, a1.end_col + padding, a2.start_col, a2.end_col, false) } fn emit_to_destination( rendered_buffer: &[Vec<StyledString>], lvl: &Level, dst: &mut Destination, short_message: bool, ) -> io::Result<()> { use crate::lock; let mut dst = dst.writable(); // In order to prevent error message interleaving, where multiple error lines get intermixed // when multiple compiler processes error simultaneously, we emit errors with additional // steps. // // On Unix systems, we write into a buffered terminal rather than directly to a terminal. When // the .flush() is called we take the buffer created from the buffered writes and write it at // one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling // scheme, this buffered approach works and maintains the styling. // // On Windows, styling happens through calls to a terminal API. This prevents us from using the // same buffering approach. Instead, we use a global Windows mutex, which we acquire long // enough to output the full error message, then we release. let _buffer_lock = lock::acquire_global_lock("rustc_errors"); for (pos, line) in rendered_buffer.iter().enumerate() { for part in line { dst.apply_style(*lvl, part.style)?; write!(dst, "{}", part.text)?; dst.reset()?; } if !short_message && (!lvl.is_failure_note() || pos != rendered_buffer.len() - 1) { writeln!(dst)?; } } dst.flush()?; Ok(()) } pub enum Destination { Terminal(StandardStream), Buffered(BufferWriter), // The bool denotes whether we should be emitting ansi color codes or not Raw(Box<(dyn Write + Send)>, bool), } pub enum WritableDst<'a> { Terminal(&'a mut StandardStream), Buffered(&'a mut BufferWriter, Buffer), Raw(&'a mut (dyn Write + Send)), ColoredRaw(Ansi<&'a mut (dyn Write + Send)>), } impl Destination { fn from_stderr(color: ColorConfig) -> Destination { let choice = color.to_color_choice(); // On Windows we'll be performing global synchronization on the entire // system for emitting rustc errors, so there's no need to buffer // anything. // // On non-Windows we rely on the atomicity of `write` to ensure errors // don't get all jumbled up. if cfg!(windows) { Terminal(StandardStream::stderr(choice)) } else { Buffered(BufferWriter::stderr(choice)) } } fn writable(&mut self) -> WritableDst<'_> { match *self { Destination::Terminal(ref mut t) => WritableDst::Terminal(t), Destination::Buffered(ref mut t) => { let buf = t.buffer(); WritableDst::Buffered(t, buf) } Destination::Raw(ref mut t, false) => WritableDst::Raw(t), Destination::Raw(ref mut t, true) => WritableDst::ColoredRaw(Ansi::new(t)), } } fn supports_color(&self) -> bool { match *self { Self::Terminal(ref stream) => stream.supports_color(), Self::Buffered(ref buffer) => buffer.buffer().supports_color(), Self::Raw(_, supports_color) => supports_color, } } } impl<'a> WritableDst<'a> { fn apply_style(&mut self, lvl: Level, style: Style) -> io::Result<()> { let mut spec = ColorSpec::new(); match style { Style::Addition => { spec.set_fg(Some(Color::Green)).set_intense(true); } Style::Removal => { spec.set_fg(Some(Color::Red)).set_intense(true); } Style::LineAndColumn => {} Style::LineNumber => { spec.set_bold(true); spec.set_intense(true); if cfg!(windows) { spec.set_fg(Some(Color::Cyan)); } else { spec.set_fg(Some(Color::Blue)); } } Style::Quotation => {} Style::MainHeaderMsg => { spec.set_bold(true); if cfg!(windows) { spec.set_intense(true).set_fg(Some(Color::White)); } } Style::UnderlinePrimary | Style::LabelPrimary => { spec = lvl.color(); spec.set_bold(true); } Style::UnderlineSecondary | Style::LabelSecondary => { spec.set_bold(true).set_intense(true); if cfg!(windows) { spec.set_fg(Some(Color::Cyan)); } else { spec.set_fg(Some(Color::Blue)); } } Style::HeaderMsg | Style::NoStyle => {} Style::Level(lvl) => { spec = lvl.color(); spec.set_bold(true); } Style::Highlight => { spec.set_bold(true); } } self.set_color(&spec) } fn set_color(&mut self, color: &ColorSpec) -> io::Result<()> { match *self { WritableDst::Terminal(ref mut t) => t.set_color(color), WritableDst::Buffered(_, ref mut t) => t.set_color(color), WritableDst::ColoredRaw(ref mut t) => t.set_color(color), WritableDst::Raw(_) => Ok(()), } } fn reset(&mut self) -> io::Result<()> { match *self { WritableDst::Terminal(ref mut t) => t.reset(), WritableDst::Buffered(_, ref mut t) => t.reset(), WritableDst::ColoredRaw(ref mut t) => t.reset(), WritableDst::Raw(_) => Ok(()), } } } impl<'a> Write for WritableDst<'a> { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { match *self { WritableDst::Terminal(ref mut t) => t.write(bytes), WritableDst::Buffered(_, ref mut buf) => buf.write(bytes), WritableDst::Raw(ref mut w) => w.write(bytes), WritableDst::ColoredRaw(ref mut t) => t.write(bytes), } } fn flush(&mut self) -> io::Result<()> { match *self { WritableDst::Terminal(ref mut t) => t.flush(), WritableDst::Buffered(_, ref mut buf) => buf.flush(), WritableDst::Raw(ref mut w) => w.flush(), WritableDst::ColoredRaw(ref mut w) => w.flush(), } } } impl<'a> Drop for WritableDst<'a> { fn drop(&mut self) { if let WritableDst::Buffered(ref mut dst, ref mut buf) = self { drop(dst.print(buf)); } } } /// Whether the original and suggested code are visually similar enough to warrant extra wording. pub fn is_case_difference(sm: &SourceMap, suggested: &str, sp: Span) -> bool { // FIXME: this should probably be extended to also account for `FO0` → `FOO` and unicode. let found = match sm.span_to_snippet(sp) { Ok(snippet) => snippet, Err(e) => { warn!(error = ?e, "Invalid span {:?}", sp); return false; } }; let ascii_confusables = &['c', 'f', 'i', 'k', 'o', 's', 'u', 'v', 'w', 'x', 'y', 'z']; // All the chars that differ in capitalization are confusable (above): let confusable = iter::zip(found.chars(), suggested.chars()) .filter(|(f, s)| f != s) .all(|(f, s)| (ascii_confusables.contains(&f) || ascii_confusables.contains(&s))); confusable && found.to_lowercase() == suggested.to_lowercase() // FIXME: We sometimes suggest the same thing we already have, which is a // bug, but be defensive against that here. && found != suggested }
39.4145
102
0.492503
4b1b2456283f5b4b3d59bc4863084e35103422a3
11,679
//! FIXME: write short doc here use rustc_hash::FxHashSet; use syntax::{ ast::{self, AstNode, AstToken, VisibilityOwner}, Direction, NodeOrToken, SourceFile, SyntaxKind::{self, *}, SyntaxNode, TextRange, TextSize, }; #[derive(Debug, PartialEq, Eq)] pub enum FoldKind { Comment, Imports, Mods, Block, ArgList, Region, } #[derive(Debug)] pub struct Fold { pub range: TextRange, pub kind: FoldKind, } pub(crate) fn folding_ranges(file: &SourceFile) -> Vec<Fold> { let mut res = vec![]; let mut visited_comments = FxHashSet::default(); let mut visited_imports = FxHashSet::default(); let mut visited_mods = FxHashSet::default(); // regions can be nested, here is a LIFO buffer let mut regions_starts: Vec<TextSize> = vec![]; for element in file.syntax().descendants_with_tokens() { // Fold items that span multiple lines if let Some(kind) = fold_kind(element.kind()) { let is_multiline = match &element { NodeOrToken::Node(node) => node.text().contains_char('\n'), NodeOrToken::Token(token) => token.text().contains('\n'), }; if is_multiline { res.push(Fold { range: element.text_range(), kind }); continue; } } match element { NodeOrToken::Token(token) => { // Fold groups of comments if let Some(comment) = ast::Comment::cast(token) { if !visited_comments.contains(&comment) { // regions are not real comments if comment.text().trim().starts_with("// region:") { regions_starts.push(comment.syntax().text_range().start()); } else if comment.text().trim().starts_with("// endregion") { if let Some(region) = regions_starts.pop() { res.push(Fold { range: TextRange::new( region, comment.syntax().text_range().end(), ), kind: FoldKind::Region, }) } } else { if let Some(range) = contiguous_range_for_comment(comment, &mut visited_comments) { res.push(Fold { range, kind: FoldKind::Comment }) } } } } } NodeOrToken::Node(node) => { // Fold groups of imports if node.kind() == USE && !visited_imports.contains(&node) { if let Some(range) = contiguous_range_for_group(&node, &mut visited_imports) { res.push(Fold { range, kind: FoldKind::Imports }) } } // Fold groups of mods if node.kind() == MODULE && !has_visibility(&node) && !visited_mods.contains(&node) { if let Some(range) = contiguous_range_for_group_unless(&node, has_visibility, &mut visited_mods) { res.push(Fold { range, kind: FoldKind::Mods }) } } } } } res } fn fold_kind(kind: SyntaxKind) -> Option<FoldKind> { match kind { COMMENT => Some(FoldKind::Comment), ARG_LIST | PARAM_LIST => Some(FoldKind::ArgList), ASSOC_ITEM_LIST | RECORD_FIELD_LIST | RECORD_PAT_FIELD_LIST | RECORD_EXPR_FIELD_LIST | ITEM_LIST | EXTERN_ITEM_LIST | USE_TREE_LIST | BLOCK_EXPR | MATCH_ARM_LIST | VARIANT_LIST | TOKEN_TREE => Some(FoldKind::Block), _ => None, } } fn has_visibility(node: &SyntaxNode) -> bool { ast::Module::cast(node.clone()).and_then(|m| m.visibility()).is_some() } fn contiguous_range_for_group( first: &SyntaxNode, visited: &mut FxHashSet<SyntaxNode>, ) -> Option<TextRange> { contiguous_range_for_group_unless(first, |_| false, visited) } fn contiguous_range_for_group_unless( first: &SyntaxNode, unless: impl Fn(&SyntaxNode) -> bool, visited: &mut FxHashSet<SyntaxNode>, ) -> Option<TextRange> { visited.insert(first.clone()); let mut last = first.clone(); for element in first.siblings_with_tokens(Direction::Next) { let node = match element { NodeOrToken::Token(token) => { if let Some(ws) = ast::Whitespace::cast(token) { if !ws.spans_multiple_lines() { // Ignore whitespace without blank lines continue; } } // There is a blank line or another token, which means that the // group ends here break; } NodeOrToken::Node(node) => node, }; // Stop if we find a node that doesn't belong to the group if node.kind() != first.kind() || unless(&node) { break; } visited.insert(node.clone()); last = node; } if first != &last { Some(TextRange::new(first.text_range().start(), last.text_range().end())) } else { // The group consists of only one element, therefore it cannot be folded None } } fn contiguous_range_for_comment( first: ast::Comment, visited: &mut FxHashSet<ast::Comment>, ) -> Option<TextRange> { visited.insert(first.clone()); // Only fold comments of the same flavor let group_kind = first.kind(); if !group_kind.shape.is_line() { return None; } let mut last = first.clone(); for element in first.syntax().siblings_with_tokens(Direction::Next) { match element { NodeOrToken::Token(token) => { if let Some(ws) = ast::Whitespace::cast(token.clone()) { if !ws.spans_multiple_lines() { // Ignore whitespace without blank lines continue; } } if let Some(c) = ast::Comment::cast(token) { if c.kind() == group_kind { // regions are not real comments if c.text().trim().starts_with("// region:") || c.text().trim().starts_with("// endregion") { break; } else { visited.insert(c.clone()); last = c; continue; } } } // The comment group ends because either: // * An element of a different kind was reached // * A comment of a different flavor was reached break; } NodeOrToken::Node(_) => break, }; } if first != last { Some(TextRange::new(first.syntax().text_range().start(), last.syntax().text_range().end())) } else { // The group consists of only one element, therefore it cannot be folded None } } #[cfg(test)] mod tests { use test_utils::extract_tags; use super::*; fn check(ra_fixture: &str) { let (ranges, text) = extract_tags(ra_fixture, "fold"); let parse = SourceFile::parse(&text); let folds = folding_ranges(&parse.tree()); assert_eq!( folds.len(), ranges.len(), "The amount of folds is different than the expected amount" ); for (fold, (range, attr)) in folds.iter().zip(ranges.into_iter()) { assert_eq!(fold.range.start(), range.start()); assert_eq!(fold.range.end(), range.end()); let kind = match fold.kind { FoldKind::Comment => "comment", FoldKind::Imports => "imports", FoldKind::Mods => "mods", FoldKind::Block => "block", FoldKind::ArgList => "arglist", FoldKind::Region => "region", }; assert_eq!(kind, &attr.unwrap()); } } #[test] fn test_fold_comments() { check( r#" <fold comment>// Hello // this is a multiline // comment //</fold> // But this is not fn main() <fold block>{ <fold comment>// We should // also // fold // this one.</fold> <fold comment>//! But this one is different //! because it has another flavor</fold> <fold comment>/* As does this multiline comment */</fold> }</fold>"#, ); } #[test] fn test_fold_imports() { check( r#" use std::<fold block>{ str, vec, io as iop }</fold>; fn main() <fold block>{ }</fold>"#, ); } #[test] fn test_fold_mods() { check( r#" pub mod foo; <fold mods>mod after_pub; mod after_pub_next;</fold> <fold mods>mod before_pub; mod before_pub_next;</fold> pub mod bar; mod not_folding_single; pub mod foobar; pub not_folding_single_next; <fold mods>#[cfg(test)] mod with_attribute; mod with_attribute_next;</fold> fn main() <fold block>{ }</fold>"#, ); } #[test] fn test_fold_import_groups() { check( r#" <fold imports>use std::str; use std::vec; use std::io as iop;</fold> <fold imports>use std::mem; use std::f64;</fold> <fold imports>use std::collections::HashMap; // Some random comment use std::collections::VecDeque;</fold> fn main() <fold block>{ }</fold>"#, ); } #[test] fn test_fold_import_and_groups() { check( r#" <fold imports>use std::str; use std::vec; use std::io as iop;</fold> <fold imports>use std::mem; use std::f64;</fold> use std::collections::<fold block>{ HashMap, VecDeque, }</fold>; // Some random comment fn main() <fold block>{ }</fold>"#, ); } #[test] fn test_folds_structs() { check( r#" struct Foo <fold block>{ }</fold> "#, ); } #[test] fn test_folds_traits() { check( r#" trait Foo <fold block>{ }</fold> "#, ); } #[test] fn test_folds_macros() { check( r#" macro_rules! foo <fold block>{ ($($tt:tt)*) => { $($tt)* } }</fold> "#, ); } #[test] fn test_fold_match_arms() { check( r#" fn main() <fold block>{ match 0 <fold block>{ 0 => 0, _ => 1, }</fold> }</fold> "#, ); } #[test] fn fold_big_calls() { check( r#" fn main() <fold block>{ frobnicate<fold arglist>( 1, 2, 3, )</fold> }</fold> "#, ) } #[test] fn fold_record_literals() { check( r#" const _: S = S <fold block>{ }</fold>; "#, ) } #[test] fn fold_multiline_params() { check( r#" fn foo<fold arglist>( x: i32, y: String, )</fold> {} "#, ) } #[test] fn fold_region() { check( r#" // 1. some normal comment <fold region>// region: test // 2. some normal comment calling_function(x,y); // endregion: test</fold> "#, ) } }
25.334056
99
0.491309
d6833a403ec10487c0f8716517f33d84ffdc0e41
16,696
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(skip_serializing)] pub value: Vec<Operation>, #[serde(rename = "nextLink", skip_serializing)] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Operation { #[serde(skip_serializing)] pub name: Option<String>, #[serde(skip_serializing)] pub display: Option<operation::Display>, } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Display { #[serde(skip_serializing)] pub provider: Option<String>, #[serde(skip_serializing)] pub resource: Option<String>, #[serde(skip_serializing)] pub operation: Option<String>, #[serde(skip_serializing)] pub description: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrackedResource { #[serde(flatten)] pub resource: Resource, pub location: String, #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceProperties { #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<resource_properties::ProvisioningState>, #[serde(rename = "creationTime", skip_serializing)] pub creation_time: Option<String>, } pub mod resource_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Accepted, Creating, Updating, Succeeded, Failed, Deleting, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sku { pub name: sku::Name, pub capacity: i32, } pub mod sku { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { S1, S2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CreateOrUpdateTrackedResourceProperties { pub location: String, #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentCreateOrUpdateParameters { #[serde(flatten)] pub create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties, pub sku: Sku, pub properties: EnvironmentCreationProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentUpdateParameters { #[serde(skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<EnvironmentMutableProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentListResponse { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<EnvironmentResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<EnvironmentResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentCreationProperties { #[serde(rename = "dataRetentionTime")] pub data_retention_time: String, #[serde(rename = "storageLimitExceededBehavior", skip_serializing_if = "Option::is_none")] pub storage_limit_exceeded_behavior: Option<environment_creation_properties::StorageLimitExceededBehavior>, } pub mod environment_creation_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum StorageLimitExceededBehavior { PurgeOldData, PauseIngress, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentResourceProperties { #[serde(flatten)] pub environment_creation_properties: EnvironmentCreationProperties, #[serde(flatten)] pub resource_properties: ResourceProperties, #[serde(rename = "dataAccessId", skip_serializing)] pub data_access_id: Option<String>, #[serde(rename = "dataAccessFqdn", skip_serializing)] pub data_access_fqdn: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentMutableProperties { #[serde(rename = "dataRetentionTime", skip_serializing_if = "Option::is_none")] pub data_retention_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSourceCreateOrUpdateParameters { #[serde(flatten)] pub create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties, pub kind: event_source_create_or_update_parameters::Kind, } pub mod event_source_create_or_update_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Kind { #[serde(rename = "Microsoft.EventHub")] MicrosoftEventHub, #[serde(rename = "Microsoft.IoTHub")] MicrosoftIoTHub, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceCreateOrUpdateParameters { #[serde(flatten)] pub event_source_create_or_update_parameters: EventSourceCreateOrUpdateParameters, pub properties: EventHubEventSourceCreationProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceCreateOrUpdateParameters { #[serde(flatten)] pub event_source_create_or_update_parameters: EventSourceCreateOrUpdateParameters, pub properties: IoTHubEventSourceCreationProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSourceUpdateParameters { #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceUpdateParameters { #[serde(flatten)] pub event_source_update_parameters: EventSourceUpdateParameters, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<EventHubEventSourceMutableProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceUpdateParameters { #[serde(flatten)] pub event_source_update_parameters: EventSourceUpdateParameters, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<IoTHubEventSourceMutableProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSourceListResponse { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<EventSourceResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSourceResource { #[serde(flatten)] pub tracked_resource: TrackedResource, pub kind: event_source_resource::Kind, } pub mod event_source_resource { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Kind { #[serde(rename = "Microsoft.EventHub")] MicrosoftEventHub, #[serde(rename = "Microsoft.IoTHub")] MicrosoftIoTHub, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceResource { #[serde(flatten)] pub event_source_resource: EventSourceResource, pub properties: EventHubEventSourceResourceProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceResource { #[serde(flatten)] pub event_source_resource: EventSourceResource, pub properties: IoTHubEventSourceResourceProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSourceCommonProperties { #[serde(flatten)] pub resource_properties: ResourceProperties, #[serde(rename = "timestampPropertyName", skip_serializing_if = "Option::is_none")] pub timestamp_property_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureEventSourceProperties { #[serde(flatten)] pub event_source_common_properties: EventSourceCommonProperties, #[serde(rename = "eventSourceResourceId")] pub event_source_resource_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceCommonProperties { #[serde(flatten)] pub azure_event_source_properties: AzureEventSourceProperties, #[serde(rename = "serviceBusNamespace")] pub service_bus_namespace: String, #[serde(rename = "eventHubName")] pub event_hub_name: String, #[serde(rename = "consumerGroupName")] pub consumer_group_name: String, #[serde(rename = "keyName")] pub key_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceCreationProperties { #[serde(flatten)] pub event_hub_event_source_common_properties: EventHubEventSourceCommonProperties, #[serde(rename = "sharedAccessKey")] pub shared_access_key: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceResourceProperties { #[serde(flatten)] pub event_hub_event_source_common_properties: EventHubEventSourceCommonProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceCommonProperties { #[serde(flatten)] pub azure_event_source_properties: AzureEventSourceProperties, #[serde(rename = "iotHubName")] pub iot_hub_name: String, #[serde(rename = "consumerGroupName")] pub consumer_group_name: String, #[serde(rename = "keyName")] pub key_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceCreationProperties { #[serde(flatten)] pub io_t_hub_event_source_common_properties: IoTHubEventSourceCommonProperties, #[serde(rename = "sharedAccessKey")] pub shared_access_key: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceResourceProperties { #[serde(flatten)] pub io_t_hub_event_source_common_properties: IoTHubEventSourceCommonProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LocalTimestamp { #[serde(skip_serializing_if = "Option::is_none")] pub format: Option<local_timestamp::Format>, #[serde(rename = "timeZoneOffset", skip_serializing_if = "Option::is_none")] pub time_zone_offset: Option<local_timestamp::TimeZoneOffset>, } pub mod local_timestamp { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Format { Embedded, Iana, TimeSpan, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TimeZoneOffset { #[serde(rename = "propertyName", skip_serializing_if = "Option::is_none")] pub property_name: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSourceMutableProperties { #[serde(rename = "timestampPropertyName", skip_serializing_if = "Option::is_none")] pub timestamp_property_name: Option<String>, #[serde(rename = "localTimestamp", skip_serializing_if = "Option::is_none")] pub local_timestamp: Option<LocalTimestamp>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventHubEventSourceMutableProperties { #[serde(flatten)] pub event_source_mutable_properties: EventSourceMutableProperties, #[serde(rename = "sharedAccessKey", skip_serializing_if = "Option::is_none")] pub shared_access_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IoTHubEventSourceMutableProperties { #[serde(flatten)] pub event_source_mutable_properties: EventSourceMutableProperties, #[serde(rename = "sharedAccessKey", skip_serializing_if = "Option::is_none")] pub shared_access_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetCreateOrUpdateParameters { #[serde(flatten)] pub create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties, pub properties: ReferenceDataSetCreationProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetUpdateParameters { #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetListResponse { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<ReferenceDataSetResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<ReferenceDataSetResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetCreationProperties { #[serde(rename = "keyProperties")] pub key_properties: Vec<ReferenceDataSetKeyProperty>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetResourceProperties { #[serde(flatten)] pub reference_data_set_creation_properties: ReferenceDataSetCreationProperties, #[serde(flatten)] pub resource_properties: ResourceProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReferenceDataSetKeyProperty { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option<reference_data_set_key_property::Type>, } pub mod reference_data_set_key_property { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { String, Double, Bool, DateTime, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessPolicyCreateOrUpdateParameters { pub properties: AccessPolicyResourceProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessPolicyUpdateParameters { pub properties: AccessPolicyMutableProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessPolicyListResponse { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<AccessPolicyResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessPolicyResource { #[serde(flatten)] pub resource: Resource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<AccessPolicyResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessPolicyResourceProperties { #[serde(rename = "principalObjectId", skip_serializing_if = "Option::is_none")] pub principal_object_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub roles: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessPolicyMutableProperties { #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub roles: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudError { #[serde(skip_serializing_if = "Option::is_none")] pub error: Option<CloudErrorBody>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudErrorBody { #[serde(skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub details: Vec<CloudErrorBody>, }
38.20595
111
0.737482
b9897f9c218eb91b8685c16033fb85fbca5de9d5
5,777
use crate::error::{add_path_index, add_path_name, optional, SchemaError, SchemaErrorKind}; use crate::utils::YamlUtils; use crate::{Context, PropertyType, Validate}; use std::convert::TryFrom; use yaml_rust::Yaml; #[derive(Debug, Default)] pub(crate) struct SchemaHash<'schema> { items: Option<Box<PropertyType<'schema>>>, } impl<'schema> TryFrom<&'schema Yaml> for SchemaHash<'schema> { type Error = SchemaError<'schema>; fn try_from(yaml: &'schema Yaml) -> Result<Self, Self::Error> { yaml.strict_contents(&[], &["items", "type"])?; // I'm using Option::from here because I don't actually want to transform // the resulting yaml object into a specific type, but need the yaml itself // to be passed into PropertyType::try_from yaml.lookup("items", "yaml", Option::from) .map(|inner| { yaml.lookup("items", "hash", Yaml::as_hash) .map_err(add_path_name("items"))?; Ok(SchemaHash { items: Some(Box::new( PropertyType::try_from(inner).map_err(add_path_name("items"))?, )), }) }) .or_else(optional(Ok(SchemaHash { items: None })))? } } impl<'yaml, 'schema: 'yaml> Validate<'yaml, 'schema> for SchemaHash<'schema> { fn validate( &self, ctx: &'schema Context<'schema>, yaml: &'yaml Yaml, ) -> Result<(), SchemaError<'yaml>> { let items = yaml.as_type("hash", Yaml::as_hash)?; if let Some(schema) = &self.items { let mut errors: Vec<SchemaError<'yaml>> = items .values() .enumerate() .map(|(i, item)| schema.validate(ctx, item).map_err(add_path_index(i))) .filter(Result::is_err) .map(Result::unwrap_err) .collect(); return if errors.is_empty() { Ok(()) } else if errors.len() == 1 { Err(errors.pop().unwrap()) } else { Err(SchemaErrorKind::Multiple { errors }.into()) }; } Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::utils::load_simple; use crate::SchemaHash; #[cfg(feature = "smallvec")] use smallvec::smallvec; #[test] fn from_yaml() { SchemaHash::try_from(&load_simple( r#" items: type: string "#, )) .unwrap(); } #[test] fn malformed_items() { assert_eq!( SchemaHash::try_from(&load_simple( r#" items: - type: string "#, )) .unwrap_err(), SchemaErrorKind::WrongType { expected: "hash", actual: "array" } .with_path(path!["items"]) .into(), ); } #[test] fn from_string() { assert_eq!( SchemaHash::try_from(&load_simple("world")).unwrap_err(), SchemaErrorKind::WrongType { expected: "hash", actual: "string" } .into() ); } #[test] fn from_integer() { assert_eq!( SchemaHash::try_from(&load_simple("10")).unwrap_err(), SchemaErrorKind::WrongType { expected: "hash", actual: "integer" } .into() ); } #[test] fn from_array() { assert_eq!( SchemaHash::try_from(&load_simple( r#" - hello - world "# )) .unwrap_err(), SchemaErrorKind::WrongType { expected: "hash", actual: "array" } .into() ); } #[test] fn validate_string() { let schema = SchemaHash::default(); assert_eq!( schema .validate(&Context::default(), &load_simple("hello world")) .unwrap_err(), SchemaErrorKind::WrongType { expected: "hash", actual: "string" } .into() ); } #[test] fn validate_integer() { let schema = SchemaHash::default(); assert_eq!( schema .validate(&Context::default(), &load_simple("10")) .unwrap_err(), SchemaErrorKind::WrongType { expected: "hash", actual: "integer" } .into() ); } #[test] fn validate_untyped_hash() { let schema = SchemaHash::default(); schema .validate(&Context::default(), &load_simple("hello: world")) .unwrap(); } #[test] fn validate_typed_hash() { let yaml = load_simple("type: hash\nitems:\n type: integer"); let schema = SchemaHash::try_from(&yaml).unwrap(); schema .validate(&Context::default(), &load_simple("hello: 20")) .unwrap(); } #[test] fn validate_invalid_typed_hash() { let yaml = load_simple("type: hash\nitems:\n type: integer"); let schema = SchemaHash::try_from(&yaml).unwrap(); assert_eq!( schema .validate( &Context::default(), &load_simple("hello: 20\nworld: clearly a string") ) .unwrap_err(), SchemaErrorKind::WrongType { expected: "integer", actual: "string" } .with_path(path![1]) ); } }
26.62212
90
0.470833
f8b7781cd927139aea7951a3f73aa5204cf70b2b
22,528
/* * hurl (https://hurl.dev) * Copyright (C) 2020 Orange * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ use std::io::Read; use std::str; use curl::easy; use encoding::all::ISO_8859_1; use encoding::{DecoderTrap, Encoding}; use std::time::Instant; use super::core::*; use super::options::ClientOptions; use super::request::*; use super::request_spec::*; use super::response::*; use std::str::FromStr; use url::Url; #[derive(Clone, Debug, PartialEq, Eq)] pub enum HttpError { CouldNotResolveProxyName, CouldNotResolveHost(String), FailToConnect, TooManyRedirect, CouldNotParseResponse, SslCertificate(Option<String>), InvalidUrl, Timeout, StatuslineIsMissing, Other { description: String, code: i32 }, } #[derive(Debug)] pub struct Client { pub options: ClientOptions, pub handle: Box<easy::Easy>, pub redirect_count: usize, // unfortunately, follow-location feature from libcurl can not be used // libcurl returns a single list of headers for the 2 responses // hurl needs to keep everything } impl Client { /// /// Init HTTP hurl client /// pub fn init(options: ClientOptions) -> Client { let mut h = easy::Easy::new(); // Set handle attributes // that are not affected by reset // Activate cookie storage // with or without persistence (empty string) h.cookie_file( options .cookie_input_file .clone() .unwrap_or_else(|| "".to_string()) .as_str(), ) .unwrap(); Client { options, handle: Box::new(h), redirect_count: 0, } } /// /// Execute an http request /// pub fn execute_with_redirect( &mut self, request: &RequestSpec, ) -> Result<Vec<(Request, Response)>, HttpError> { let mut calls = vec![]; let mut request_spec = request.clone(); self.redirect_count = 0; loop { let (request, response) = self.execute(&request_spec)?; calls.push((request, response.clone())); if let Some(url) = self.get_follow_location(response.clone()) { request_spec = RequestSpec { method: Method::Get, url, headers: vec![], querystring: vec![], form: vec![], multipart: vec![], cookies: vec![], body: Body::Binary(vec![]), content_type: None, }; self.redirect_count += 1; if let Some(max_redirect) = self.options.max_redirect { if self.redirect_count > max_redirect { return Err(HttpError::TooManyRedirect); } } } else { break; } } Ok(calls) } /// /// Execute an http request /// pub fn execute(&mut self, request: &RequestSpec) -> Result<(Request, Response), HttpError> { // set handle attributes // that have not been set or reset self.handle.verbose(true).unwrap(); self.handle.ssl_verify_host(!self.options.insecure).unwrap(); self.handle.ssl_verify_peer(!self.options.insecure).unwrap(); if let Some(proxy) = self.options.proxy.clone() { self.handle.proxy(proxy.as_str()).unwrap(); } if let Some(s) = self.options.no_proxy.clone() { self.handle.noproxy(s.as_str()).unwrap(); } self.handle.timeout(self.options.timeout).unwrap(); self.handle .connect_timeout(self.options.connect_timeout) .unwrap(); let url = self.generate_url(&request.url, &request.querystring); self.handle.url(url.as_str()).unwrap(); self.set_method(&request.method); self.set_cookies(&request.cookies); self.set_form(&request.form); self.set_multipart(&request.multipart); let bytes = request.body.bytes(); let mut data: &[u8] = bytes.as_ref(); self.set_body(data); self.set_headers(request); let verbose = self.options.verbose; let mut request_headers: Vec<Header> = vec![]; let start = Instant::now(); let mut status_lines = vec![]; let mut headers = vec![]; let mut body = Vec::<u8>::new(); { let mut transfer = self.handle.transfer(); if !data.is_empty() { transfer .read_function(|buf| Ok(data.read(buf).unwrap_or(0))) .unwrap(); } transfer .debug_function(|info_type, data| match info_type { // return all request headers (not one by one) easy::InfoType::HeaderOut => { let mut lines = split_lines(data); if verbose { for line in lines.clone() { eprintln!("> {}", line); } } lines.pop().unwrap(); lines.remove(0); // method/url for line in lines { if let Some(header) = Header::parse(line) { request_headers.push(header); } } } easy::InfoType::HeaderIn => { if let Some(s) = decode_header(data) { if verbose { eprint!("< {}", s); } } } _ => {} }) .unwrap(); transfer .header_function(|h| { if let Some(s) = decode_header(h) { if s.starts_with("HTTP/") { status_lines.push(s); } else { headers.push(s) } } true }) .unwrap(); transfer .write_function(|data| { body.extend(data); Ok(data.len()) }) .unwrap(); if let Err(e) = transfer.perform() { return match e.code() { 3 => Err(HttpError::InvalidUrl), 5 => Err(HttpError::CouldNotResolveProxyName), 6 => Err(HttpError::CouldNotResolveHost(extract_host( request.url.clone(), ))), 7 => Err(HttpError::FailToConnect), 28 => Err(HttpError::Timeout), 60 => Err(HttpError::SslCertificate( e.extra_description().map(String::from), )), _ => Err(HttpError::Other { code: e.code() as i32, // due to windows build description: e.description().to_string(), }), }; } } let status = self.handle.response_code().unwrap(); let version = match status_lines.last() { None => return Err(HttpError::StatuslineIsMissing {}), Some(status_line) => self.parse_response_version(status_line.clone())?, }; let headers = self.parse_response_headers(&headers); let duration = start.elapsed(); self.handle.reset(); let request = Request { url, method: (&request.method).to_string(), headers: request_headers, }; let response = Response { version, status, headers, body, duration, }; Ok((request, response)) } /// /// generate url /// fn generate_url(&mut self, url: &str, params: &[Param]) -> String { let url = if params.is_empty() { url.to_string() } else { let url = if url.ends_with('?') { url.to_string() } else if url.contains('?') { format!("{}&", url) } else { format!("{}?", url) }; let s = self.encode_params(params); format!("{}{}", url, s) }; url } /// /// set method /// fn set_method(&mut self, method: &Method) { match method { Method::Get => self.handle.custom_request("GET").unwrap(), Method::Post => self.handle.custom_request("POST").unwrap(), Method::Put => self.handle.custom_request("PUT").unwrap(), Method::Head => self.handle.custom_request("HEAD").unwrap(), Method::Delete => self.handle.custom_request("DELETE").unwrap(), Method::Connect => self.handle.custom_request("CONNECT").unwrap(), Method::Options => self.handle.custom_request("OPTIONS").unwrap(), Method::Trace => self.handle.custom_request("TRACE").unwrap(), Method::Patch => self.handle.custom_request("PATCH").unwrap(), } } /// /// set request headers /// fn set_headers(&mut self, request: &RequestSpec) { let mut list = easy::List::new(); for header in request.headers.clone() { list.append(format!("{}: {}", header.name, header.value).as_str()) .unwrap(); } if get_header_values(request.headers.clone(), "Content-Type".to_string()).is_empty() { if let Some(s) = request.content_type.clone() { list.append(format!("Content-Type: {}", s).as_str()) .unwrap(); } else { list.append("Content-Type:").unwrap(); // remove header Content-Type } } if get_header_values(request.headers.clone(), "Expect".to_string()).is_empty() { list.append("Expect:").unwrap(); // remove header Expect } if get_header_values(request.headers.clone(), "User-Agent".to_string()).is_empty() { list.append(format!("User-Agent: hurl/{}", clap::crate_version!()).as_str()) .unwrap(); } if let Some(user) = self.options.user.clone() { let authorization = base64::encode(user.as_bytes()); if get_header_values(request.headers.clone(), "Authorization".to_string()).is_empty() { list.append(format!("Authorization: Basic {}", authorization).as_str()) .unwrap(); } } if self.options.compressed && get_header_values(request.headers.clone(), "Accept-Encoding".to_string()).is_empty() { list.append("Accept-Encoding: gzip, deflate, br").unwrap(); } self.handle.http_headers(list).unwrap(); } /// /// set request cookies /// fn set_cookies(&mut self, cookies: &[RequestCookie]) { let s = cookies .iter() .map(|c| c.to_string()) .collect::<Vec<String>>() .join("; "); if !s.is_empty() { self.handle.cookie(s.as_str()).unwrap(); } } /// /// set form /// fn set_form(&mut self, params: &[Param]) { if !params.is_empty() { let s = self.encode_params(params); self.handle.post_fields_copy(s.as_str().as_bytes()).unwrap(); //self.handle.write_function(sink); } } /// /// set form /// fn set_multipart(&mut self, params: &[MultipartParam]) { if !params.is_empty() { let mut form = easy::Form::new(); for param in params { match param { MultipartParam::Param(Param { name, value }) => { form.part(name).contents(value.as_bytes()).add().unwrap() } MultipartParam::FileParam(FileParam { name, filename, data, content_type, }) => form .part(name) .buffer(filename, data.clone()) .content_type(content_type) .add() .unwrap(), } } self.handle.httppost(form).unwrap(); } } /// /// set body /// fn set_body(&mut self, data: &[u8]) { if !data.is_empty() { self.handle.post(true).unwrap(); self.handle.post_field_size(data.len() as u64).unwrap(); } } /// /// encode parameters /// fn encode_params(&mut self, params: &[Param]) -> String { params .iter() .map(|p| { let value = self.handle.url_encode(p.value.as_bytes()); format!("{}={}", p.name, value) }) .collect::<Vec<String>>() .join("&") } /// /// parse response version /// fn parse_response_version(&mut self, line: String) -> Result<Version, HttpError> { if line.starts_with("HTTP/1.0") { Ok(Version::Http10) } else if line.starts_with("HTTP/1.1") { Ok(Version::Http11) } else if line.starts_with("HTTP/2") { Ok(Version::Http2) } else { Err(HttpError::CouldNotParseResponse) } } /// /// parse headers from libcurl responses /// fn parse_response_headers(&mut self, lines: &[String]) -> Vec<Header> { let mut headers: Vec<Header> = vec![]; for line in lines { if let Some(header) = Header::parse(line.to_string()) { headers.push(header); } } headers } /// /// retrieve an optional location to follow /// You need: /// 1. the option follow_location set to true /// 2. a 3xx response code /// 3. a header Location /// fn get_follow_location(&mut self, response: Response) -> Option<String> { if !self.options.follow_location { return None; } let response_code = response.status; if !(300..400).contains(&response_code) { return None; } let location = match get_header_values(response.headers, "Location".to_string()).get(0) { None => return None, Some(value) => value.clone(), }; if location.is_empty() { None } else { Some(location) } } /// /// get cookie storage /// pub fn get_cookie_storage(&mut self) -> Vec<Cookie> { let list = self.handle.cookies().unwrap(); let mut cookies = vec![]; for cookie in list.iter() { let line = str::from_utf8(cookie).unwrap(); if let Ok(cookie) = Cookie::from_str(line) { cookies.push(cookie); } else { eprintln!("warning: line <{}> can not be parsed as cookie", line); } } cookies } /// /// Add cookie to Cookiejar /// pub fn add_cookie(&mut self, cookie: Cookie) { if self.options.verbose { eprintln!("* add to cookie store: {}", cookie); } self.handle .cookie_list(cookie.to_string().as_str()) .unwrap(); } /// /// Clear cookie storage /// pub fn clear_cookie_storage(&mut self) { if self.options.verbose { eprintln!("* clear cookie storage"); } self.handle.cookie_list("ALL").unwrap(); } /// /// return curl command-line for the http request run by the client /// pub fn curl_command_line(&mut self, http_request: &RequestSpec) -> String { let mut arguments = vec!["curl".to_string()]; arguments.append(&mut http_request.curl_args(self.options.context_dir.clone())); let cookies = all_cookies(self.get_cookie_storage(), http_request); if !cookies.is_empty() { arguments.push("--cookie".to_string()); arguments.push(format!( "'{}'", cookies .iter() .map(|c| c.to_string()) .collect::<Vec<String>>() .join("; ") )); } arguments.append(&mut self.options.curl_args()); arguments.join(" ") } } /// /// return cookies from both cookies from the cookie storage and the request /// pub fn all_cookies(cookie_storage: Vec<Cookie>, request: &RequestSpec) -> Vec<RequestCookie> { let mut cookies = request.cookies.clone(); cookies.append( &mut cookie_storage .iter() .filter(|c| c.expires != "1") // cookie expired when libcurl set value to 1? .filter(|c| match_cookie(c, request.url.as_str())) .map(|c| RequestCookie { name: (*c).name.clone(), value: c.value.clone(), }) .collect(), ); cookies } /// /// Match cookie for a given url /// pub fn match_cookie(cookie: &Cookie, url: &str) -> bool { // is it possible to do it with libcurl? let url = Url::parse(url).expect("valid url"); if let Some(domain) = url.domain() { if cookie.include_subdomain == "FALSE" { if cookie.domain != domain { return false; } } else if !domain.ends_with(cookie.domain.as_str()) { return false; } } url.path().starts_with(cookie.path.as_str()) } impl Header { /// /// Parse an http header line received from the server /// It does not panic. Just return none if it can not be parsed /// pub fn parse(line: String) -> Option<Header> { match line.find(':') { Some(index) => { let (name, value) = line.split_at(index); Some(Header { name: name.to_string().trim().to_string(), value: value[1..].to_string().trim().to_string(), }) } None => None, } } } /// /// Extract Hostname for url /// assume that that the url is a valud url /// fn extract_host(url: String) -> String { let url = Url::parse(url.as_str()).expect("valid url"); url.host().expect("valid host").to_string() } /// /// Split an array of bytes into http lines (\r\n separator) /// fn split_lines(data: &[u8]) -> Vec<String> { let mut lines = vec![]; let mut start = 0; let mut i = 0; while i < (data.len() - 1) { if data[i] == 13 && data[i + 1] == 10 { if let Ok(s) = str::from_utf8(&data[start..i]) { lines.push(s.to_string()); } start = i + 2; i += 2; } else { i += 1; } } lines } /// /// Decode optionally header value as text with utf8 or iso-8859-1 encoding /// pub fn decode_header(data: &[u8]) -> Option<String> { match str::from_utf8(data) { Ok(s) => Some(s.to_string()), Err(_) => match ISO_8859_1.decode(data, DecoderTrap::Strict) { Ok(s) => Some(s), Err(_) => { println!("Error decoding header both utf8 and iso-8859-1 {:?}", data); None } }, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_header() { assert_eq!( Header::parse("Foo: Bar\r\n".to_string()).unwrap(), Header { name: "Foo".to_string(), value: "Bar".to_string(), } ); assert_eq!( Header::parse("Location: http://localhost:8000/redirected\r\n".to_string()).unwrap(), Header { name: "Location".to_string(), value: "http://localhost:8000/redirected".to_string(), } ); assert!(Header::parse("Foo".to_string()).is_none()); } #[test] fn test_split_lines_header() { let data = b"GET /hello HTTP/1.1\r\nHost: localhost:8000\r\n\r\n"; let lines = split_lines(data); assert_eq!(lines.len(), 3); assert_eq!(lines.get(0).unwrap().as_str(), "GET /hello HTTP/1.1"); assert_eq!(lines.get(1).unwrap().as_str(), "Host: localhost:8000"); assert_eq!(lines.get(2).unwrap().as_str(), ""); } #[test] fn test_match_cookie() { let cookie = Cookie { domain: "example.com".to_string(), include_subdomain: "FALSE".to_string(), path: "/".to_string(), https: "".to_string(), expires: "".to_string(), name: "".to_string(), value: "".to_string(), http_only: false, }; assert!(match_cookie(&cookie, "http://example.com/toto")); assert!(!match_cookie(&cookie, "http://sub.example.com/tata")); assert!(!match_cookie(&cookie, "http://toto/tata")); let cookie = Cookie { domain: "example.com".to_string(), include_subdomain: "TRUE".to_string(), path: "/toto".to_string(), https: "".to_string(), expires: "".to_string(), name: "".to_string(), value: "".to_string(), http_only: false, }; assert!(match_cookie(&cookie, "http://example.com/toto")); assert!(match_cookie(&cookie, "http://sub.example.com/toto")); assert!(!match_cookie(&cookie, "http://example.com/tata")); } }
31.729577
99
0.49454
ebf5898b9f7d078b8642f5db80cbaf4b7d96b3b7
247
#![feature(asm, lang_items)] #[lang = "panic_fmt"] #[no_mangle] pub extern fn panic_fmt() -> ! { loop{} } mod syscall; use syscall::*; #[no_mangle] pub extern "C" fn kmain() { for i in 0..1000 { sys_print(i); } sys_exit(); }
17.642857
76
0.57085
f49d9b35dd4a70c451335410b79fdb8cbf624bc9
10,058
use super::*; use crate::core::{ reconciler::PollTriggerEvent, registry::Registry, specs::ResourceSpecsLocked, wrapper::NodeWrapper, }; use common::{ errors::{GrpcRequestError, SvcError}, v0::msg_translation::RpcToMessageBus, }; use common_lib::types::v0::message_bus::{ Filter, GetSpecs, Node, NodeId, NodeState, NodeStatus, Specs, States, }; use crate::core::wrapper::InternalOps; use rpc::mayastor::ListBlockDevicesRequest; use snafu::ResultExt; use std::{collections::HashMap, sync::Arc}; /// Node's Service #[derive(Debug, Clone)] pub(crate) struct Service { registry: Registry, /// deadline for receiving keepalive/Register messages deadline: std::time::Duration, /// node communication timeouts comms_timeouts: NodeCommsTimeout, } /// Node communication Timeouts for establishing the connection to a node and /// the request itself #[derive(Debug, Clone)] pub(crate) struct NodeCommsTimeout { /// node gRPC connection timeout connect: std::time::Duration, /// gRPC request timeout request: std::time::Duration, } impl NodeCommsTimeout { /// return a new `Self` with the connect and request timeouts pub(crate) fn new(connect: std::time::Duration, request: std::time::Duration) -> Self { Self { connect, request } } /// timeout to establish connection to the node pub fn connect(&self) -> std::time::Duration { self.connect } /// timeout for the request itself pub fn request(&self) -> std::time::Duration { self.request } } impl Service { /// New Node Service which uses the `registry` as its node cache and sets /// the `deadline` to each node's watchdog pub(super) async fn new( registry: Registry, deadline: std::time::Duration, request: std::time::Duration, connect: std::time::Duration, ) -> Self { let service = Self { registry, deadline, comms_timeouts: NodeCommsTimeout::new(connect, request), }; // attempt to reload the node state based on the specification for node in service.registry.specs().get_nodes() { service .register_state( &Register { id: node.id().clone(), grpc_endpoint: node.endpoint().to_string(), }, true, ) .await; } service } fn specs(&self) -> &ResourceSpecsLocked { self.registry.specs() } /// Callback to be called when a node's watchdog times out pub(super) async fn on_timeout(service: &Service, id: &NodeId) { let registry = service.registry.clone(); let state = registry.nodes().read().await; if let Some(node) = state.get(id) { let mut node = node.write().await; if node.is_online() { node.update_liveness().await; } } } /// Register a new node through the register information pub(super) async fn register(&self, registration: &Register) { self.registry.register_node_spec(registration).await; self.register_state(registration, false).await; } /// Attempt to Register a new node state through the register information. /// todo: if we enable concurrent registrations when we move to gRPC, we'll want /// to make sure we don't process registrations for the same node in parallel. pub(super) async fn register_state(&self, registration: &Register, startup: bool) { let node_state = NodeState { id: registration.id.clone(), grpc_endpoint: registration.grpc_endpoint.clone(), status: NodeStatus::Online, }; let nodes = self.registry.nodes(); let node = nodes.write().await.get_mut(&node_state.id).cloned(); let send_event = match node { None => { let mut node = NodeWrapper::new(&node_state, self.deadline, self.comms_timeouts.clone()); let mut result = node.liveness_probe().await; if result.is_ok() { result = node.load().await; } match result { Ok(_) => { let mut nodes = self.registry.nodes().write().await; if nodes.get_mut(&node_state.id).is_none() { node.watchdog_mut().arm(self.clone()); let node = Arc::new(tokio::sync::RwLock::new(node)); nodes.insert(node_state.id().clone(), node); true } else { false } } Err(error) => { tracing::warn!( node = %node_state.id(), error = %error, "Failed to register node" ); false } } } Some(node) => matches!(node.on_register().await, Ok(true)), }; // don't send these events on startup as the reconciler will start working afterwards anyway if send_event && !startup { self.registry .notify(PollTriggerEvent::NodeStateChangeOnline) .await; } } /// Deregister a node through the deregister information pub(super) async fn deregister(&self, node: &Deregister) { let nodes = self.registry.nodes().read().await; match nodes.get(&node.id) { None => {} // ideally we want this node to disappear completely when it's not // part of the daemonset, but we just don't have that kind of // information at this level :( // maybe nodes should also be registered/deregistered via REST? Some(node) => { node.write().await.set_status(NodeStatus::Unknown); } } } /// Get nodes by filter pub(crate) async fn get_nodes(&self, request: &GetNodes) -> Result<Nodes, SvcError> { match request.filter() { Filter::None => { let node_states = self.registry.get_node_states().await; let node_specs = self.specs().get_nodes(); let mut nodes = HashMap::new(); node_states.into_iter().for_each(|state| { let spec = node_specs.iter().find(|s| s.id() == &state.id); nodes.insert( state.id.clone(), Node::new(state.id.clone(), spec.cloned(), Some(state)), ); }); node_specs.into_iter().for_each(|spec| { if nodes.get(spec.id()).is_none() { nodes.insert( spec.id().clone(), Node::new(spec.id().clone(), Some(spec), None), ); } }); Ok(Nodes(nodes.values().cloned().collect())) } Filter::Node(node_id) => { let node_state = self.registry.get_node_state(node_id).await.ok(); let node_spec = self.specs().get_node(node_id).ok(); if node_state.is_none() && node_spec.is_none() { Err(SvcError::NodeNotFound { node_id: node_id.to_owned(), }) } else { Ok(Nodes(vec![Node::new( node_id.clone(), node_spec, node_state, )])) } } _ => Err(SvcError::InvalidFilter { filter: request.filter().clone(), }), } } /// Get block devices from a node pub(crate) async fn get_block_devices( &self, request: &GetBlockDevices, ) -> Result<BlockDevices, SvcError> { let node = self.registry.get_node_wrapper(&request.node).await?; let grpc = node.read().await.grpc_context()?; let mut client = grpc.connect().await?; let result = client .mayastor .list_block_devices(ListBlockDevicesRequest { all: request.all }) .await; let response = result .context(GrpcRequestError { resource: ResourceKind::Block, request: "list_block_devices", })? .into_inner(); let bdevs = response .devices .iter() .map(|rpc_bdev| rpc_bdev.to_mbus()) .collect(); Ok(BlockDevices(bdevs)) } /// Get specs from the registry pub(crate) async fn get_specs(&self, _request: &GetSpecs) -> Result<Specs, SvcError> { let specs = self.specs().write(); Ok(Specs { volumes: specs.get_volumes(), nexuses: specs.get_nexuses(), replicas: specs.get_replicas(), pools: specs.get_pools(), }) } /// Get state information for all resources. pub(crate) async fn get_states(&self, _request: &GetStates) -> Result<States, SvcError> { let mut nexuses = vec![]; let mut pools = vec![]; let mut replicas = vec![]; // Aggregate the state information from each node. let nodes = self.registry.nodes().read().await; for (_node_id, locked_node_wrapper) in nodes.iter() { let node_wrapper = locked_node_wrapper.read().await; nexuses.extend(node_wrapper.nexus_states()); pools.extend(node_wrapper.pool_states()); replicas.extend(node_wrapper.replica_states()); } Ok(States { nexuses, pools, replicas, }) } }
35.540636
100
0.525651
e6360cccf3b96d9dfb35276b3fa78b1ac7092680
50,209
//! A bunch of methods and structures more or less related to resolving macros and //! interface provided by `Resolver` to macro expander. use crate::imports::ImportResolver; use crate::Namespace::*; use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BuiltinMacroState, Determinacy}; use crate::{CrateLint, ParentScope, ResolutionError, Resolver, Scope, ScopeSet, Weak}; use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment, ToNameBinding}; use rustc_ast::{self as ast, NodeId}; use rustc_ast_lowering::ResolverAstLowering; use rustc_ast_pretty::pprust; use rustc_attr::StabilityLevel; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::ptr_key::PtrKey; use rustc_data_structures::sync::Lrc; use rustc_errors::struct_span_err; use rustc_expand::base::{Indeterminate, InvocationRes, ResolverExpand, SyntaxExtension}; use rustc_expand::compile_declarative_macro; use rustc_expand::expand::{AstFragment, Invocation, InvocationKind}; use rustc_feature::is_builtin_attr_name; use rustc_hir::def::{self, DefKind, NonMacroAttrKind}; use rustc_hir::def_id; use rustc_middle::middle::stability; use rustc_middle::ty; use rustc_session::lint::builtin::{SOFT_UNSTABLE, UNUSED_MACROS}; use rustc_session::parse::feature_err; use rustc_session::Session; use rustc_span::edition::Edition; use rustc_span::hygiene::{self, ExpnData, ExpnId, ExpnKind}; use rustc_span::hygiene::{AstPass, MacroKind}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{Span, DUMMY_SP}; use std::cell::Cell; use std::{mem, ptr}; type Res = def::Res<NodeId>; /// Binding produced by a `macro_rules` item. /// Not modularized, can shadow previous `macro_rules` bindings, etc. #[derive(Debug)] pub struct MacroRulesBinding<'a> { crate binding: &'a NameBinding<'a>, /// `macro_rules` scope into which the `macro_rules` item was planted. crate parent_macro_rules_scope: MacroRulesScopeRef<'a>, crate ident: Ident, } /// The scope introduced by a `macro_rules!` macro. /// This starts at the macro's definition and ends at the end of the macro's parent /// module (named or unnamed), or even further if it escapes with `#[macro_use]`. /// Some macro invocations need to introduce `macro_rules` scopes too because they /// can potentially expand into macro definitions. #[derive(Copy, Clone, Debug)] pub enum MacroRulesScope<'a> { /// Empty "root" scope at the crate start containing no names. Empty, /// The scope introduced by a `macro_rules!` macro definition. Binding(&'a MacroRulesBinding<'a>), /// The scope introduced by a macro invocation that can potentially /// create a `macro_rules!` macro definition. Invocation(ExpnId), } /// `macro_rules!` scopes are always kept by reference and inside a cell. /// The reason is that we update scopes with value `MacroRulesScope::Invocation(invoc_id)` /// in-place after `invoc_id` gets expanded. /// This helps to avoid uncontrollable growth of `macro_rules!` scope chains, /// which usually grow lineraly with the number of macro invocations /// in a module (including derives) and hurt performance. pub(crate) type MacroRulesScopeRef<'a> = PtrKey<'a, Cell<MacroRulesScope<'a>>>; // Macro namespace is separated into two sub-namespaces, one for bang macros and // one for attribute-like macros (attributes, derives). // We ignore resolutions from one sub-namespace when searching names in scope for another. fn sub_namespace_match(candidate: Option<MacroKind>, requirement: Option<MacroKind>) -> bool { #[derive(PartialEq)] enum SubNS { Bang, AttrLike, } let sub_ns = |kind| match kind { MacroKind::Bang => SubNS::Bang, MacroKind::Attr | MacroKind::Derive => SubNS::AttrLike, }; let candidate = candidate.map(sub_ns); let requirement = requirement.map(sub_ns); // "No specific sub-namespace" means "matches anything" for both requirements and candidates. candidate.is_none() || requirement.is_none() || candidate == requirement } // We don't want to format a path using pretty-printing, // `format!("{}", path)`, because that tries to insert // line-breaks and is slow. fn fast_print_path(path: &ast::Path) -> Symbol { if path.segments.len() == 1 { path.segments[0].ident.name } else { let mut path_str = String::with_capacity(64); for (i, segment) in path.segments.iter().enumerate() { if i != 0 { path_str.push_str("::"); } if segment.ident.name != kw::PathRoot { path_str.push_str(&segment.ident.as_str()) } } Symbol::intern(&path_str) } } /// The code common between processing `#![register_tool]` and `#![register_attr]`. fn registered_idents( sess: &Session, attrs: &[ast::Attribute], attr_name: Symbol, descr: &str, ) -> FxHashSet<Ident> { let mut registered = FxHashSet::default(); for attr in sess.filter_by_name(attrs, attr_name) { for nested_meta in attr.meta_item_list().unwrap_or_default() { match nested_meta.ident() { Some(ident) => { if let Some(old_ident) = registered.replace(ident) { let msg = format!("{} `{}` was already registered", descr, ident); sess.struct_span_err(ident.span, &msg) .span_label(old_ident.span, "already registered here") .emit(); } } None => { let msg = format!("`{}` only accepts identifiers", attr_name); let span = nested_meta.span(); sess.struct_span_err(span, &msg).span_label(span, "not an identifier").emit(); } } } } registered } crate fn registered_attrs_and_tools( sess: &Session, attrs: &[ast::Attribute], ) -> (FxHashSet<Ident>, FxHashSet<Ident>) { let registered_attrs = registered_idents(sess, attrs, sym::register_attr, "attribute"); let mut registered_tools = registered_idents(sess, attrs, sym::register_tool, "tool"); // We implicitly add `rustfmt` and `clippy` to known tools, // but it's not an error to register them explicitly. let predefined_tools = [sym::clippy, sym::rustfmt]; registered_tools.extend(predefined_tools.iter().cloned().map(Ident::with_dummy_span)); (registered_attrs, registered_tools) } impl<'a> ResolverExpand for Resolver<'a> { fn next_node_id(&mut self) -> NodeId { self.next_node_id() } fn resolve_dollar_crates(&mut self) { hygiene::update_dollar_crate_names(|ctxt| { let ident = Ident::new(kw::DollarCrate, DUMMY_SP.with_ctxt(ctxt)); match self.resolve_crate_root(ident).kind { ModuleKind::Def(.., name) if name != kw::Empty => name, _ => kw::Crate, } }); } fn visit_ast_fragment_with_placeholders(&mut self, expansion: ExpnId, fragment: &AstFragment) { // Integrate the new AST fragment into all the definition and module structures. // We are inside the `expansion` now, but other parent scope components are still the same. let parent_scope = ParentScope { expansion, ..self.invocation_parent_scopes[&expansion] }; let output_macro_rules_scope = self.build_reduced_graph(fragment, parent_scope); self.output_macro_rules_scopes.insert(expansion, output_macro_rules_scope); parent_scope.module.unexpanded_invocations.borrow_mut().remove(&expansion); } fn register_builtin_macro(&mut self, ident: Ident, ext: SyntaxExtension) { if self.builtin_macros.insert(ident.name, BuiltinMacroState::NotYetSeen(ext)).is_some() { self.session .span_err(ident.span, &format!("built-in macro `{}` was already defined", ident)); } } // Create a new Expansion with a definition site of the provided module, or // a fake empty `#[no_implicit_prelude]` module if no module is provided. fn expansion_for_ast_pass( &mut self, call_site: Span, pass: AstPass, features: &[Symbol], parent_module_id: Option<NodeId>, ) -> ExpnId { let expn_id = ExpnId::fresh(Some(ExpnData::allow_unstable( ExpnKind::AstPass(pass), call_site, self.session.edition(), features.into(), None, ))); let parent_scope = if let Some(module_id) = parent_module_id { let parent_def_id = self.local_def_id(module_id); self.definitions.add_parent_module_of_macro_def(expn_id, parent_def_id.to_def_id()); self.module_map[&parent_def_id] } else { self.definitions.add_parent_module_of_macro_def( expn_id, def_id::DefId::local(def_id::CRATE_DEF_INDEX), ); self.empty_module }; self.ast_transform_scopes.insert(expn_id, parent_scope); expn_id } fn resolve_imports(&mut self) { ImportResolver { r: self }.resolve_imports() } fn resolve_macro_invocation( &mut self, invoc: &Invocation, eager_expansion_root: ExpnId, force: bool, ) -> Result<InvocationRes, Indeterminate> { let invoc_id = invoc.expansion_data.id; let parent_scope = match self.invocation_parent_scopes.get(&invoc_id) { Some(parent_scope) => *parent_scope, None => { // If there's no entry in the table, then we are resolving an eagerly expanded // macro, which should inherit its parent scope from its eager expansion root - // the macro that requested this eager expansion. let parent_scope = *self .invocation_parent_scopes .get(&eager_expansion_root) .expect("non-eager expansion without a parent scope"); self.invocation_parent_scopes.insert(invoc_id, parent_scope); parent_scope } }; let (path, kind, inner_attr, derives, after_derive) = match invoc.kind { InvocationKind::Attr { ref attr, ref derives, after_derive, .. } => ( &attr.get_normal_item().path, MacroKind::Attr, attr.style == ast::AttrStyle::Inner, self.arenas.alloc_ast_paths(derives), after_derive, ), InvocationKind::Bang { ref mac, .. } => { (&mac.path, MacroKind::Bang, false, &[][..], false) } InvocationKind::Derive { ref path, .. } => { (path, MacroKind::Derive, false, &[][..], false) } InvocationKind::DeriveContainer { ref derives, .. } => { // Block expansion of the container until we resolve all derives in it. // This is required for two reasons: // - Derive helper attributes are in scope for the item to which the `#[derive]` // is applied, so they have to be produced by the container's expansion rather // than by individual derives. // - Derives in the container need to know whether one of them is a built-in `Copy`. // FIXME: Try to avoid repeated resolutions for derives here and in expansion. let mut exts = Vec::new(); let mut helper_attrs = Vec::new(); for path in derives { exts.push( match self.resolve_macro_path( path, Some(MacroKind::Derive), &parent_scope, true, force, ) { Ok((Some(ext), _)) => { let span = path .segments .last() .unwrap() .ident .span .normalize_to_macros_2_0(); helper_attrs.extend( ext.helper_attrs.iter().map(|name| Ident::new(*name, span)), ); if ext.is_derive_copy { self.containers_deriving_copy.insert(invoc_id); } ext } Ok(_) | Err(Determinacy::Determined) => { self.dummy_ext(MacroKind::Derive) } Err(Determinacy::Undetermined) => return Err(Indeterminate), }, ) } self.helper_attrs.insert(invoc_id, helper_attrs); return Ok(InvocationRes::DeriveContainer(exts)); } }; // Derives are not included when `invocations` are collected, so we have to add them here. let parent_scope = &ParentScope { derives, ..parent_scope }; let require_inert = !invoc.fragment_kind.supports_macro_expansion(); let node_id = self.lint_node_id(eager_expansion_root); let (ext, res) = self.smart_resolve_macro_path( path, kind, require_inert, inner_attr, parent_scope, node_id, force, )?; let span = invoc.span(); invoc_id.set_expn_data(ext.expn_data( parent_scope.expansion, span, fast_print_path(path), res.opt_def_id(), )); if let Res::Def(_, _) = res { if after_derive { self.session.span_err(span, "macro attributes must be placed before `#[derive]`"); } let normal_module_def_id = self.macro_def_scope(invoc_id).nearest_parent_mod; self.definitions.add_parent_module_of_macro_def(invoc_id, normal_module_def_id); } Ok(InvocationRes::Single(ext)) } fn check_unused_macros(&mut self) { for (_, &(node_id, span)) in self.unused_macros.iter() { self.lint_buffer.buffer_lint(UNUSED_MACROS, node_id, span, "unused macro definition"); } } fn lint_node_id(&mut self, expn_id: ExpnId) -> NodeId { self.invocation_parents .get(&expn_id) .map_or(ast::CRATE_NODE_ID, |id| self.def_id_to_node_id[*id]) } fn has_derive_copy(&self, expn_id: ExpnId) -> bool { self.containers_deriving_copy.contains(&expn_id) } // The function that implements the resolution logic of `#[cfg_accessible(path)]`. // Returns true if the path can certainly be resolved in one of three namespaces, // returns false if the path certainly cannot be resolved in any of the three namespaces. // Returns `Indeterminate` if we cannot give a certain answer yet. fn cfg_accessible(&mut self, expn_id: ExpnId, path: &ast::Path) -> Result<bool, Indeterminate> { let span = path.span; let path = &Segment::from_path(path); let parent_scope = self.invocation_parent_scopes[&expn_id]; let mut indeterminate = false; for ns in [TypeNS, ValueNS, MacroNS].iter().copied() { match self.resolve_path(path, Some(ns), &parent_scope, false, span, CrateLint::No) { PathResult::Module(ModuleOrUniformRoot::Module(_)) => return Ok(true), PathResult::NonModule(partial_res) if partial_res.unresolved_segments() == 0 => { return Ok(true); } PathResult::Indeterminate => indeterminate = true, // FIXME: `resolve_path` is not ready to report partially resolved paths // correctly, so we just report an error if the path was reported as unresolved. // This needs to be fixed for `cfg_accessible` to be useful. PathResult::NonModule(..) | PathResult::Failed { .. } => {} PathResult::Module(_) => panic!("unexpected path resolution"), } } if indeterminate { return Err(Indeterminate); } self.session .struct_span_err(span, "not sure whether the path is accessible or not") .span_note(span, "`cfg_accessible` is not fully implemented") .emit(); Ok(false) } } impl<'a> Resolver<'a> { /// Resolve macro path with error reporting and recovery. /// Uses dummy syntax extensions for unresolved macros or macros with unexpected resolutions /// for better error recovery. fn smart_resolve_macro_path( &mut self, path: &ast::Path, kind: MacroKind, require_inert: bool, inner_attr: bool, parent_scope: &ParentScope<'a>, node_id: NodeId, force: bool, ) -> Result<(Lrc<SyntaxExtension>, Res), Indeterminate> { let (ext, res) = match self.resolve_macro_path(path, Some(kind), parent_scope, true, force) { Ok((Some(ext), res)) => (ext, res), Ok((None, res)) => (self.dummy_ext(kind), res), Err(Determinacy::Determined) => (self.dummy_ext(kind), Res::Err), Err(Determinacy::Undetermined) => return Err(Indeterminate), }; // Report errors for the resolved macro. for segment in &path.segments { if let Some(args) = &segment.args { self.session.span_err(args.span(), "generic arguments in macro path"); } if kind == MacroKind::Attr && segment.ident.as_str().starts_with("rustc") { self.session.span_err( segment.ident.span, "attributes starting with `rustc` are reserved for use by the `rustc` compiler", ); } } match res { Res::Def(DefKind::Macro(_), def_id) => { if let Some(def_id) = def_id.as_local() { self.unused_macros.remove(&def_id); if self.proc_macro_stubs.contains(&def_id) { self.session.span_err( path.span, "can't use a procedural macro from the same crate that defines it", ); } } } Res::NonMacroAttr(..) | Res::Err => {} _ => panic!("expected `DefKind::Macro` or `Res::NonMacroAttr`"), }; self.check_stability_and_deprecation(&ext, path, node_id); let unexpected_res = if ext.macro_kind() != kind { Some((kind.article(), kind.descr_expected())) } else if require_inert && matches!(res, Res::Def(..)) { Some(("a", "non-macro attribute")) } else { None }; if let Some((article, expected)) = unexpected_res { let path_str = pprust::path_to_string(path); let msg = format!("expected {}, found {} `{}`", expected, res.descr(), path_str); self.session .struct_span_err(path.span, &msg) .span_label(path.span, format!("not {} {}", article, expected)) .emit(); return Ok((self.dummy_ext(kind), Res::Err)); } // We are trying to avoid reporting this error if other related errors were reported. if res != Res::Err && inner_attr && !self.session.features_untracked().custom_inner_attributes { let msg = match res { Res::Def(..) => "inner macro attributes are unstable", Res::NonMacroAttr(..) => "custom inner attributes are unstable", _ => unreachable!(), }; if path == &sym::test { self.session.parse_sess.buffer_lint(SOFT_UNSTABLE, path.span, node_id, msg); } else { feature_err(&self.session.parse_sess, sym::custom_inner_attributes, path.span, msg) .emit(); } } Ok((ext, res)) } pub fn resolve_macro_path( &mut self, path: &ast::Path, kind: Option<MacroKind>, parent_scope: &ParentScope<'a>, trace: bool, force: bool, ) -> Result<(Option<Lrc<SyntaxExtension>>, Res), Determinacy> { let path_span = path.span; let mut path = Segment::from_path(path); // Possibly apply the macro helper hack if kind == Some(MacroKind::Bang) && path.len() == 1 && path[0].ident.span.ctxt().outer_expn_data().local_inner_macros { let root = Ident::new(kw::DollarCrate, path[0].ident.span); path.insert(0, Segment::from_ident(root)); } let res = if path.len() > 1 { let res = match self.resolve_path( &path, Some(MacroNS), parent_scope, false, path_span, CrateLint::No, ) { PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => { Ok(path_res.base_res()) } PathResult::Indeterminate if !force => return Err(Determinacy::Undetermined), PathResult::NonModule(..) | PathResult::Indeterminate | PathResult::Failed { .. } => Err(Determinacy::Determined), PathResult::Module(..) => unreachable!(), }; if trace { let kind = kind.expect("macro kind must be specified if tracing is enabled"); self.multi_segment_macro_resolutions.push(( path, path_span, kind, *parent_scope, res.ok(), )); } self.prohibit_imported_non_macro_attrs(None, res.ok(), path_span); res } else { let scope_set = kind.map_or(ScopeSet::All(MacroNS, false), ScopeSet::Macro); let binding = self.early_resolve_ident_in_lexical_scope( path[0].ident, scope_set, parent_scope, false, force, path_span, ); if let Err(Determinacy::Undetermined) = binding { return Err(Determinacy::Undetermined); } if trace { let kind = kind.expect("macro kind must be specified if tracing is enabled"); self.single_segment_macro_resolutions.push(( path[0].ident, kind, *parent_scope, binding.ok(), )); } let res = binding.map(|binding| binding.res()); self.prohibit_imported_non_macro_attrs(binding.ok(), res.ok(), path_span); res }; res.map(|res| (self.get_macro(res), res)) } // Resolve an identifier in lexical scope. // This is a variation of `fn resolve_ident_in_lexical_scope` that can be run during // expansion and import resolution (perhaps they can be merged in the future). // The function is used for resolving initial segments of macro paths (e.g., `foo` in // `foo::bar!(); or `foo!();`) and also for import paths on 2018 edition. crate fn early_resolve_ident_in_lexical_scope( &mut self, orig_ident: Ident, scope_set: ScopeSet, parent_scope: &ParentScope<'a>, record_used: bool, force: bool, path_span: Span, ) -> Result<&'a NameBinding<'a>, Determinacy> { bitflags::bitflags! { struct Flags: u8 { const MACRO_RULES = 1 << 0; const MODULE = 1 << 1; const MISC_SUGGEST_CRATE = 1 << 2; const MISC_SUGGEST_SELF = 1 << 3; const MISC_FROM_PRELUDE = 1 << 4; } } assert!(force || !record_used); // `record_used` implies `force` // Make sure `self`, `super` etc produce an error when passed to here. if orig_ident.is_path_segment_keyword() { return Err(Determinacy::Determined); } let (ns, macro_kind, is_import) = match scope_set { ScopeSet::All(ns, is_import) => (ns, None, is_import), ScopeSet::AbsolutePath(ns) => (ns, None, false), ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false), }; // This is *the* result, resolution from the scope closest to the resolved identifier. // However, sometimes this result is "weak" because it comes from a glob import or // a macro expansion, and in this case it cannot shadow names from outer scopes, e.g. // mod m { ... } // solution in outer scope // { // use prefix::*; // imports another `m` - innermost solution // // weak, cannot shadow the outer `m`, need to report ambiguity error // m::mac!(); // } // So we have to save the innermost solution and continue searching in outer scopes // to detect potential ambiguities. let mut innermost_result: Option<(&NameBinding<'_>, Flags)> = None; let mut determinacy = Determinacy::Determined; // Go through all the scopes and try to resolve the name. let break_result = self.visit_scopes( scope_set, parent_scope, orig_ident, |this, scope, use_prelude, ident| { let ok = |res, span, arenas| { Ok(( (res, ty::Visibility::Public, span, ExpnId::root()).to_name_binding(arenas), Flags::empty(), )) }; let result = match scope { Scope::DeriveHelpers(expn_id) => { if let Some(attr) = this .helper_attrs .get(&expn_id) .and_then(|attrs| attrs.iter().rfind(|i| ident == **i)) { let binding = ( Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper), ty::Visibility::Public, attr.span, expn_id, ) .to_name_binding(this.arenas); Ok((binding, Flags::empty())) } else { Err(Determinacy::Determined) } } Scope::DeriveHelpersCompat => { let mut result = Err(Determinacy::Determined); for derive in parent_scope.derives { let parent_scope = &ParentScope { derives: &[], ..*parent_scope }; match this.resolve_macro_path( derive, Some(MacroKind::Derive), parent_scope, true, force, ) { Ok((Some(ext), _)) => { if ext.helper_attrs.contains(&ident.name) { result = ok( Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat), derive.span, this.arenas, ); break; } } Ok(_) | Err(Determinacy::Determined) => {} Err(Determinacy::Undetermined) => { result = Err(Determinacy::Undetermined) } } } result } Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() { MacroRulesScope::Binding(macro_rules_binding) if ident == macro_rules_binding.ident => { Ok((macro_rules_binding.binding, Flags::MACRO_RULES)) } MacroRulesScope::Invocation(_) => Err(Determinacy::Undetermined), _ => Err(Determinacy::Determined), }, Scope::CrateRoot => { let root_ident = Ident::new(kw::PathRoot, ident.span); let root_module = this.resolve_crate_root(root_ident); let binding = this.resolve_ident_in_module_ext( ModuleOrUniformRoot::Module(root_module), ident, ns, parent_scope, record_used, path_span, ); match binding { Ok(binding) => Ok((binding, Flags::MODULE | Flags::MISC_SUGGEST_CRATE)), Err((Determinacy::Undetermined, Weak::No)) => { return Some(Err(Determinacy::determined(force))); } Err((Determinacy::Undetermined, Weak::Yes)) => { Err(Determinacy::Undetermined) } Err((Determinacy::Determined, _)) => Err(Determinacy::Determined), } } Scope::Module(module) => { let adjusted_parent_scope = &ParentScope { module, ..*parent_scope }; let binding = this.resolve_ident_in_module_unadjusted_ext( ModuleOrUniformRoot::Module(module), ident, ns, adjusted_parent_scope, true, record_used, path_span, ); match binding { Ok(binding) => { let misc_flags = if ptr::eq(module, this.graph_root) { Flags::MISC_SUGGEST_CRATE } else if module.is_normal() { Flags::MISC_SUGGEST_SELF } else { Flags::empty() }; Ok((binding, Flags::MODULE | misc_flags)) } Err((Determinacy::Undetermined, Weak::No)) => { return Some(Err(Determinacy::determined(force))); } Err((Determinacy::Undetermined, Weak::Yes)) => { Err(Determinacy::Undetermined) } Err((Determinacy::Determined, _)) => Err(Determinacy::Determined), } } Scope::RegisteredAttrs => match this.registered_attrs.get(&ident).cloned() { Some(ident) => ok( Res::NonMacroAttr(NonMacroAttrKind::Registered), ident.span, this.arenas, ), None => Err(Determinacy::Determined), }, Scope::MacroUsePrelude => { match this.macro_use_prelude.get(&ident.name).cloned() { Some(binding) => Ok((binding, Flags::MISC_FROM_PRELUDE)), None => Err(Determinacy::determined( this.graph_root.unexpanded_invocations.borrow().is_empty(), )), } } Scope::BuiltinAttrs => { if is_builtin_attr_name(ident.name) { ok(Res::NonMacroAttr(NonMacroAttrKind::Builtin), DUMMY_SP, this.arenas) } else { Err(Determinacy::Determined) } } Scope::ExternPrelude => match this.extern_prelude_get(ident, !record_used) { Some(binding) => Ok((binding, Flags::empty())), None => Err(Determinacy::determined( this.graph_root.unexpanded_invocations.borrow().is_empty(), )), }, Scope::ToolPrelude => match this.registered_tools.get(&ident).cloned() { Some(ident) => ok(Res::ToolMod, ident.span, this.arenas), None => Err(Determinacy::Determined), }, Scope::StdLibPrelude => { let mut result = Err(Determinacy::Determined); if let Some(prelude) = this.prelude { if let Ok(binding) = this.resolve_ident_in_module_unadjusted( ModuleOrUniformRoot::Module(prelude), ident, ns, parent_scope, false, path_span, ) { if use_prelude || this.is_builtin_macro(binding.res()) { result = Ok((binding, Flags::MISC_FROM_PRELUDE)); } } } result } Scope::BuiltinTypes => { match this.primitive_type_table.primitive_types.get(&ident.name).cloned() { Some(prim_ty) => ok(Res::PrimTy(prim_ty), DUMMY_SP, this.arenas), None => Err(Determinacy::Determined), } } }; match result { Ok((binding, flags)) if sub_namespace_match(binding.macro_kind(), macro_kind) => { if !record_used { return Some(Ok(binding)); } if let Some((innermost_binding, innermost_flags)) = innermost_result { // Found another solution, if the first one was "weak", report an error. let (res, innermost_res) = (binding.res(), innermost_binding.res()); if res != innermost_res { let builtin = Res::NonMacroAttr(NonMacroAttrKind::Builtin); let derive_helper_compat = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat); let ambiguity_error_kind = if is_import { Some(AmbiguityKind::Import) } else if innermost_res == builtin || res == builtin { Some(AmbiguityKind::BuiltinAttr) } else if innermost_res == derive_helper_compat || res == derive_helper_compat { Some(AmbiguityKind::DeriveHelper) } else if innermost_flags.contains(Flags::MACRO_RULES) && flags.contains(Flags::MODULE) && !this.disambiguate_macro_rules_vs_modularized( innermost_binding, binding, ) || flags.contains(Flags::MACRO_RULES) && innermost_flags.contains(Flags::MODULE) && !this.disambiguate_macro_rules_vs_modularized( binding, innermost_binding, ) { Some(AmbiguityKind::MacroRulesVsModularized) } else if innermost_binding.is_glob_import() { Some(AmbiguityKind::GlobVsOuter) } else if innermost_binding .may_appear_after(parent_scope.expansion, binding) { Some(AmbiguityKind::MoreExpandedVsOuter) } else { None }; if let Some(kind) = ambiguity_error_kind { let misc = |f: Flags| { if f.contains(Flags::MISC_SUGGEST_CRATE) { AmbiguityErrorMisc::SuggestCrate } else if f.contains(Flags::MISC_SUGGEST_SELF) { AmbiguityErrorMisc::SuggestSelf } else if f.contains(Flags::MISC_FROM_PRELUDE) { AmbiguityErrorMisc::FromPrelude } else { AmbiguityErrorMisc::None } }; this.ambiguity_errors.push(AmbiguityError { kind, ident: orig_ident, b1: innermost_binding, b2: binding, misc1: misc(innermost_flags), misc2: misc(flags), }); return Some(Ok(innermost_binding)); } } } else { // Found the first solution. innermost_result = Some((binding, flags)); } } Ok(..) | Err(Determinacy::Determined) => {} Err(Determinacy::Undetermined) => determinacy = Determinacy::Undetermined, } None }, ); if let Some(break_result) = break_result { return break_result; } // The first found solution was the only one, return it. if let Some((binding, _)) = innermost_result { return Ok(binding); } Err(Determinacy::determined(determinacy == Determinacy::Determined || force)) } crate fn finalize_macro_resolutions(&mut self) { let check_consistency = |this: &mut Self, path: &[Segment], span, kind: MacroKind, initial_res: Option<Res>, res: Res| { if let Some(initial_res) = initial_res { if res != initial_res { // Make sure compilation does not succeed if preferred macro resolution // has changed after the macro had been expanded. In theory all such // situations should be reported as errors, so this is a bug. this.session.delay_span_bug(span, "inconsistent resolution for a macro"); } } else { // It's possible that the macro was unresolved (indeterminate) and silently // expanded into a dummy fragment for recovery during expansion. // Now, post-expansion, the resolution may succeed, but we can't change the // past and need to report an error. // However, non-speculative `resolve_path` can successfully return private items // even if speculative `resolve_path` returned nothing previously, so we skip this // less informative error if the privacy error is reported elsewhere. if this.privacy_errors.is_empty() { let msg = format!( "cannot determine resolution for the {} `{}`", kind.descr(), Segment::names_to_string(path) ); let msg_note = "import resolution is stuck, try simplifying macro imports"; this.session.struct_span_err(span, &msg).note(msg_note).emit(); } } }; let macro_resolutions = mem::take(&mut self.multi_segment_macro_resolutions); for (mut path, path_span, kind, parent_scope, initial_res) in macro_resolutions { // FIXME: Path resolution will ICE if segment IDs present. for seg in &mut path { seg.id = None; } match self.resolve_path( &path, Some(MacroNS), &parent_scope, true, path_span, CrateLint::No, ) { PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => { let res = path_res.base_res(); check_consistency(self, &path, path_span, kind, initial_res, res); } path_res @ PathResult::NonModule(..) | path_res @ PathResult::Failed { .. } => { let (span, label) = if let PathResult::Failed { span, label, .. } = path_res { (span, label) } else { ( path_span, format!( "partially resolved path in {} {}", kind.article(), kind.descr() ), ) }; self.report_error( span, ResolutionError::FailedToResolve { label, suggestion: None }, ); } PathResult::Module(..) | PathResult::Indeterminate => unreachable!(), } } let macro_resolutions = mem::take(&mut self.single_segment_macro_resolutions); for (ident, kind, parent_scope, initial_binding) in macro_resolutions { match self.early_resolve_ident_in_lexical_scope( ident, ScopeSet::Macro(kind), &parent_scope, true, true, ident.span, ) { Ok(binding) => { let initial_res = initial_binding.map(|initial_binding| { self.record_use(ident, MacroNS, initial_binding, false); initial_binding.res() }); let res = binding.res(); let seg = Segment::from_ident(ident); check_consistency(self, &[seg], ident.span, kind, initial_res, res); } Err(..) => { let expected = kind.descr_expected(); let msg = format!("cannot find {} `{}` in this scope", expected, ident); let mut err = self.session.struct_span_err(ident.span, &msg); self.unresolved_macro_suggestions(&mut err, kind, &parent_scope, ident); err.emit(); } } } let builtin_attrs = mem::take(&mut self.builtin_attrs); for (ident, parent_scope) in builtin_attrs { let _ = self.early_resolve_ident_in_lexical_scope( ident, ScopeSet::Macro(MacroKind::Attr), &parent_scope, true, true, ident.span, ); } } fn check_stability_and_deprecation( &mut self, ext: &SyntaxExtension, path: &ast::Path, node_id: NodeId, ) { let span = path.span; if let Some(stability) = &ext.stability { if let StabilityLevel::Unstable { reason, issue, is_soft } = stability.level { let feature = stability.feature; if !self.active_features.contains(&feature) && !span.allows_unstable(feature) { let lint_buffer = &mut self.lint_buffer; let soft_handler = |lint, span, msg: &_| lint_buffer.buffer_lint(lint, node_id, span, msg); stability::report_unstable( self.session, feature, reason, issue, is_soft, span, soft_handler, ); } } } if let Some(depr) = &ext.deprecation { let path = pprust::path_to_string(&path); let (message, lint) = stability::deprecation_message(depr, "macro", &path); stability::early_report_deprecation( &mut self.lint_buffer, &message, depr.suggestion, lint, span, node_id, ); } } fn prohibit_imported_non_macro_attrs( &self, binding: Option<&'a NameBinding<'a>>, res: Option<Res>, span: Span, ) { if let Some(Res::NonMacroAttr(kind)) = res { if kind != NonMacroAttrKind::Tool && binding.map_or(true, |b| b.is_import()) { let msg = format!("cannot use {} {} through an import", kind.article(), kind.descr()); let mut err = self.session.struct_span_err(span, &msg); if let Some(binding) = binding { err.span_note(binding.span, &format!("the {} imported here", kind.descr())); } err.emit(); } } } crate fn check_reserved_macro_name(&mut self, ident: Ident, res: Res) { // Reserve some names that are not quite covered by the general check // performed on `Resolver::builtin_attrs`. if ident.name == sym::cfg || ident.name == sym::cfg_attr || ident.name == sym::derive { let macro_kind = self.get_macro(res).map(|ext| ext.macro_kind()); if macro_kind.is_some() && sub_namespace_match(macro_kind, Some(MacroKind::Attr)) { self.session.span_err( ident.span, &format!("name `{}` is reserved in attribute namespace", ident), ); } } } /// Compile the macro into a `SyntaxExtension` and possibly replace /// its expander to a pre-defined one for built-in macros. crate fn compile_macro(&mut self, item: &ast::Item, edition: Edition) -> SyntaxExtension { let mut result = compile_declarative_macro( &self.session, self.session.features_untracked(), item, edition, ); if result.is_builtin { // The macro was marked with `#[rustc_builtin_macro]`. if let Some(builtin_macro) = self.builtin_macros.get_mut(&item.ident.name) { // The macro is a built-in, replace its expander function // while still taking everything else from the source code. // If we already loaded this builtin macro, give a better error message than 'no such builtin macro'. match mem::replace(builtin_macro, BuiltinMacroState::AlreadySeen(item.span)) { BuiltinMacroState::NotYetSeen(ext) => result.kind = ext.kind, BuiltinMacroState::AlreadySeen(span) => { struct_span_err!( self.session, item.span, E0773, "attempted to define built-in macro more than once" ) .span_note(span, "previously defined here") .emit(); } } } else { let msg = format!("cannot find a built-in macro with name `{}`", item.ident); self.session.span_err(item.span, &msg); } } result } }
44.829464
117
0.493218
3921d306b49d84f159592eeeb6cf23de593652ce
12,114
//! Universal Asynchronous Receiver/Transmitter. //! //! For STM32F4 series of high-performance MCUs with DSP and FPU instructions. use drone_core::periph; use drone_cortexm::reg::marker::*; periph! { /// Generic UART peripheral variant. pub trait UartMap {} /// Generic UART peripheral. pub struct UartPeriph; RCC { BUSENR { 0x20 RwRegBitBand Shared; UARTEN { RwRwRegFieldBitBand } } BUSRSTR { 0x20 RwRegBitBand Shared; UARTRST { RwRwRegFieldBitBand } } BUSSMENR { 0x20 RwRegBitBand Shared; UARTSMEN { RwRwRegFieldBitBand } } } UART { SR { 0x20 RwRegBitBand; CTS { RwRwRegFieldBit Option } LBD { RwRwRegFieldBit } TXE { RoRwRegFieldBit } TC { RwRwRegFieldBit } RXNE { RwRwRegFieldBit } IDLE { RoRwRegFieldBit } ORE { RoRwRegFieldBit } NF { RoRwRegFieldBit } FE { RoRwRegFieldBit } PE { RoRwRegFieldBit } } DR { 0x20 RwRegBitBand; DR { RwRwRegFieldBits } } BRR { 0x20 RwRegBitBand; DIV_Mantissa { RwRwRegFieldBits } DIV_Fraction { RwRwRegFieldBits } } CR1 { 0x20 RwRegBitBand; OVER8 { RwRwRegFieldBit } UE { RwRwRegFieldBit } M { RwRwRegFieldBit } WAKE { RwRwRegFieldBit } PCE { RwRwRegFieldBit } PS { RwRwRegFieldBit } PEIE { RwRwRegFieldBit } TXEIE { RwRwRegFieldBit } TCIE { RwRwRegFieldBit } RXNEIE { RwRwRegFieldBit } IDLEIE { RwRwRegFieldBit } TE { RwRwRegFieldBit } RE { RwRwRegFieldBit } RWU { RwRwRegFieldBit } SBK { RwRwRegFieldBit } } CR2 { 0x20 RwRegBitBand; LINEN { RwRwRegFieldBit } STOP { RwRwRegFieldBits } CLKEN { RwRwRegFieldBit Option } CPOL { RwRwRegFieldBit Option } CPHA { RwRwRegFieldBit Option } LBCL { RwRwRegFieldBit Option } LBDIE { RwRwRegFieldBit } LBDL { RwRwRegFieldBit } ADD { RwRwRegFieldBits } } CR3 { 0x20 RwRegBitBand; ONEBIT { RwRwRegFieldBit } CTSIE { RwRwRegFieldBit Option } CTSE { RwRwRegFieldBit Option } RTSE { RwRwRegFieldBit Option } DMAT { RwRwRegFieldBit } DMAR { RwRwRegFieldBit } SCEN { RwRwRegFieldBit Option } NACK { RwRwRegFieldBit Option } HDSEL { RwRwRegFieldBit } IRLP { RwRwRegFieldBit } IREN { RwRwRegFieldBit } EIE { RwRwRegFieldBit } } GTPR { 0x20 RwRegBitBand Option; GT { RwRwRegFieldBits } PSC { RwRwRegFieldBits } } } } macro_rules! map_uart { ( $uart_macro_doc:expr, $uart_macro:ident, $uart_ty_doc:expr, $uart_ty:ident, $busenr:ident, $busrstr:ident, $bussmenr:ident, $uarten:ident, $uartrst:ident, $uartsmen:ident, $uart:ident, ($($cts:ident)?), ($($clken:ident)?), ($($cpol:ident)?), ($($cpha:ident)?), ($($lbcl:ident)?), ($($ctsie:ident)?), ($($ctse:ident)?), ($($rtse:ident)?), ($($scen:ident)?), ($($nack:ident)?), ($($gtpr:ident)?), ) => { periph::map! { #[doc = $uart_macro_doc] pub macro $uart_macro; #[doc = $uart_ty_doc] pub struct $uart_ty; impl UartMap for $uart_ty {} drone_stm32_map_pieces::reg; crate; RCC { BUSENR { $busenr Shared; UARTEN { $uarten } } BUSRSTR { $busrstr Shared; UARTRST { $uartrst } } BUSSMENR { $bussmenr Shared; UARTSMEN { $uartsmen } } } UART { $uart; SR { SR; CTS { $($cts Option)* } LBD { LBD } TXE { TXE } TC { TC } RXNE { RXNE } IDLE { IDLE } ORE { ORE } NF { NF } FE { FE } PE { PE } } DR { DR; DR { DR } } BRR { BRR; DIV_Mantissa { DIV_Mantissa } DIV_Fraction { DIV_Fraction } } CR1 { CR1; OVER8 { OVER8 } UE { UE } M { M } WAKE { WAKE } PCE { PCE } PS { PS } PEIE { PEIE } TXEIE { TXEIE } TCIE { TCIE } RXNEIE { RXNEIE } IDLEIE { IDLEIE } TE { TE } RE { RE } RWU { RWU } SBK { SBK } } CR2 { CR2; LINEN { LINEN } STOP { STOP } CLKEN { $($clken Option)* } CPOL { $($cpol Option)* } CPHA { $($cpha Option)* } LBCL { $($lbcl Option)* } LBDIE { LBDIE } LBDL { LBDL } ADD { ADD } } CR3 { CR3; ONEBIT { ONEBIT } CTSIE { $($ctsie Option)* } CTSE { $($ctse Option)* } RTSE { $($rtse Option)* } DMAT { DMAT } DMAR { DMAR } SCEN { $($scen Option)* } NACK { $($nack Option)* } HDSEL { HDSEL } IRLP { IRLP } IREN { IREN } EIE { EIE } } GTPR { $( $gtpr Option; GT { GT } PSC { PSC } )* } } } } } #[cfg(any( stm32_mcu = "stm32f401", stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f410", stm32_mcu = "stm32f411", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts USART1 register tokens.", periph_usart1, "USART1 peripheral variant.", Usart1, APB2ENR, APB2RSTR, APB2LPENR, USART1EN, USART1RST, USART1LPEN, USART1, (CTS), (CLKEN), (CPOL), (CPHA), (LBCL), (CTSIE), (CTSE), (RTSE), (SCEN), (NACK), (GTPR), } #[cfg(any( stm32_mcu = "stm32f401", stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f410", stm32_mcu = "stm32f411", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts USART2 register tokens.", periph_usart2, "USART2 peripheral variant.", Usart2, APB1ENR, APB1RSTR, APB1LPENR, USART2EN, UART2RST, USART2LPEN, USART2, (CTS), (CLKEN), (CPOL), (CPHA), (LBCL), (CTSIE), (CTSE), (RTSE), (SCEN), (NACK), (GTPR), } #[cfg(any( stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts USART3 register tokens.", periph_usart3, "USART3 peripheral variant.", Usart3, APB1ENR, APB1RSTR, APB1LPENR, USART3EN, USART3RST, USART3LPEN, USART3, (CTS), (CLKEN), (CPOL), (CPHA), (LBCL), (CTSIE), (CTSE), (RTSE), (SCEN), (NACK), (GTPR), } #[cfg(any( stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts UART4 register tokens.", periph_uart4, "UART4 peripheral variant.", Uart4, APB1ENR, APB1RSTR, APB1LPENR, UART4EN, UART4RST, UART4LPEN, UART4, (), (), (), (), (), (), (), (), (), (), (), } #[cfg(any( stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts UART5 register tokens.", periph_uart5, "UART5 peripheral variant.", Uart5, APB1ENR, APB1RSTR, APB1LPENR, UART5EN, UART5RST, UART5LPEN, UART5, (), (), (), (), (), (), (), (), (), (), (), } #[cfg(any( stm32_mcu = "stm32f401", stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f410", stm32_mcu = "stm32f411", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts USART6 register tokens.", periph_usart6, "USART6 peripheral variant.", Usart6, APB2ENR, APB2RSTR, APB2LPENR, USART6EN, USART6RST, USART6LPEN, USART6, (CTS), (CLKEN), (CPOL), (CPHA), (LBCL), (CTSIE), (CTSE), (RTSE), (SCEN), (NACK), (GTPR), } #[cfg(any( stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts UART7 register tokens.", periph_uart7, "UART7 peripheral variant.", Uart7, APB1ENR, APB1RSTR, APB1LPENR, UART7EN, UART7RST, UART7LPEN, UART7, (), (), (), (), (), (), (), (), (), (), (), } #[cfg(any( stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f469", ))] map_uart! { "Extracts UART8 register tokens.", periph_uart8, "UART8 peripheral variant.", Uart8, APB1ENR, APB1RSTR, APB1LPENR, UART8EN, UART8RST, UART8LPEN, UART8, (), (), (), (), (), (), (), (), (), (), (), } #[cfg(any(stm32_mcu = "stm32f413",))] map_uart! { "Extracts UART9 register tokens.", periph_uart9, "UART9 peripheral variant.", Uart9, APB2ENR, APB2RSTR, APB2LPENR, UART9EN, UART9RST, UART9LPEN, UART9, (), (), (), (), (), (), (), (), (), (), (), } #[cfg(any(stm32_mcu = "stm32f413",))] map_uart! { "Extracts UART10 register tokens.", periph_uart10, "UART10 peripheral variant.", Uart10, APB2ENR, APB2RSTR, APB2LPENR, UART10EN, UART10RST, UART10LPEN, UART10, (), (), (), (), (), (), (), (), (), (), (), }
21.141361
78
0.443454
d5eb23b766a01ee4171c77f6a40273ec102b661f
887
use crate::prelude::*; use nu_errors::ShellError; use nu_protocol::{CommandAction, ReturnSuccess, Signature}; use nu_engine::WholeStreamCommand; pub struct Previous; impl WholeStreamCommand for Previous { fn name(&self) -> &str { "p" } fn signature(&self) -> Signature { Signature::build("p") } fn usage(&self) -> &str { "Go to previous shell." } fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> { Ok(previous(args)) } } fn previous(_args: CommandArgs) -> OutputStream { vec![Ok(ReturnSuccess::Action(CommandAction::PreviousShell))].into() } #[cfg(test)] mod tests { use super::Previous; use super::ShellError; #[test] fn examples_work_as_expected() -> Result<(), ShellError> { use crate::examples::test as test_examples; test_examples(Previous {}) } }
20.627907
74
0.631342
87bc7615a19e768fc4f66ed38b5ec8a6f06dfdef
649
use std::env::current_dir; use std::fs::create_dir_all; use cosmwasm_schema::{export_schema, remove_schemas, schema_for}; use cs_map::msg::{CountResponse, HandleMsg, InitMsg, QueryMsg}; use cs_map::state::State; fn main() { let mut out_dir = current_dir().unwrap(); out_dir.push("schema"); create_dir_all(&out_dir).unwrap(); remove_schemas(&out_dir).unwrap(); export_schema(&schema_for!(InitMsg), &out_dir); export_schema(&schema_for!(HandleMsg), &out_dir); export_schema(&schema_for!(QueryMsg), &out_dir); export_schema(&schema_for!(State), &out_dir); export_schema(&schema_for!(CountResponse), &out_dir); }
30.904762
65
0.714946
7a44a376a026ce00927f3dac6c426c79e4787da8
1,433
use lzma_rs::decompress::{Options, Stream}; use std::io::{Cursor, Write}; /// LZMA_STATUS is a type that represents either success or failure. #[repr(C)] pub enum LZMA_STATUS { /// Status is successful LZMA_STATUS_OK, /// An error occurred LZMA_STATUS_ERROR, } #[no_mangle] /// Size of encoded properties in header. pub static LZMA_PROPS_SIZE: usize = 5; /// Use the lzma algorithm to decompress a chunk of data. /// /// Returns Status::OK on success, Status::ERROR otherwise. #[no_mangle] pub extern "C" fn lzma_decompress( input: *const u8, input_len: &mut usize, output: *mut u8, output_len: &mut usize, allow_incomplete: bool, memlimit: usize, ) -> LZMA_STATUS { let input = unsafe { std::slice::from_raw_parts(input, *input_len) }; let output = unsafe { std::slice::from_raw_parts_mut(output, *output_len) }; let output = Cursor::new(output); let options = Options { memlimit: Some(memlimit), allow_incomplete, ..Default::default() }; let mut stream = Stream::new_with_options(&options, output); if let Err(_) = stream.write_all(&input[..]) { if !allow_incomplete { return LZMA_STATUS::LZMA_STATUS_ERROR; } } if let Ok(output) = stream.finish() { *output_len = output.position() as usize; LZMA_STATUS::LZMA_STATUS_OK } else { LZMA_STATUS::LZMA_STATUS_ERROR } }
26.537037
80
0.646895
21b07587edf9b4bdb05e04387a726d4e97508736
6,021
//! Binary machine code emission. //! //! The `binemit` module contains code for translating Cranelift's intermediate representation into //! binary machine code. mod memorysink; mod relaxation; mod shrink; pub use self::memorysink::{MemoryCodeSink, NullTrapSink, RelocSink, TrapSink}; pub use self::relaxation::relax_branches; pub use self::shrink::shrink_instructions; pub use crate::regalloc::RegDiversions; use crate::ir::{ExternalName, Function, Inst, JumpTable, SourceLoc, TrapCode}; use core::fmt; /// Offset in bytes from the beginning of the function. /// /// Cranelift can be used as a cross compiler, so we don't want to use a type like `usize` which /// depends on the *host* platform, not the *target* platform. pub type CodeOffset = u32; /// Addend to add to the symbol value. pub type Addend = i64; /// Relocation kinds for every ISA #[derive(Copy, Clone, Debug)] pub enum Reloc { /// absolute 4-byte Abs4, /// absolute 8-byte Abs8, /// x86 PC-relative 4-byte X86PCRel4, /// x86 PC-relative 4-byte offset to trailing rodata X86PCRelRodata4, /// x86 call to PC-relative 4-byte X86CallPCRel4, /// x86 call to PLT-relative 4-byte X86CallPLTRel4, /// x86 GOT PC-relative 4-byte X86GOTPCRel4, /// Arm32 call target Arm32Call, /// Arm64 call target Arm64Call, /// RISC-V call target RiscvCall, } impl fmt::Display for Reloc { /// Display trait implementation drops the arch, since its used in contexts where the arch is /// already unambiguous, e.g. clif syntax with isa specified. In other contexts, use Debug. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Reloc::Abs4 => write!(f, "Abs4"), Reloc::Abs8 => write!(f, "Abs8"), Reloc::X86PCRel4 => write!(f, "PCRel4"), Reloc::X86PCRelRodata4 => write!(f, "PCRelRodata4"), Reloc::X86CallPCRel4 => write!(f, "CallPCRel4"), Reloc::X86CallPLTRel4 => write!(f, "CallPLTRel4"), Reloc::X86GOTPCRel4 => write!(f, "GOTPCRel4"), Reloc::Arm32Call | Reloc::Arm64Call | Reloc::RiscvCall => write!(f, "Call"), } } } /// Container for information about a vector of compiled code and its supporting read-only data. /// /// The code starts at offset 0 and is followed optionally by relocatable jump tables and copyable /// (raw binary) read-only data. Any padding between sections is always part of the section that /// precedes the boundary between the sections. #[derive(PartialEq)] pub struct CodeInfo { /// Number of bytes of machine code (the code starts at offset 0). pub code_size: CodeOffset, /// Number of bytes of jumptables. pub jumptables_size: CodeOffset, /// Number of bytes of rodata. pub rodata_size: CodeOffset, /// Number of bytes in total. pub total_size: CodeOffset, } impl CodeInfo { /// Offset of any relocatable jump tables, or equal to rodata if there are no jump tables. pub fn jumptables(&self) -> CodeOffset { self.code_size } /// Offset of any copyable read-only data, or equal to total_size if there are no rodata. pub fn rodata(&self) -> CodeOffset { self.code_size + self.jumptables_size } } /// Abstract interface for adding bytes to the code segment. /// /// A `CodeSink` will receive all of the machine code for a function. It also accepts relocations /// which are locations in the code section that need to be fixed up when linking. pub trait CodeSink { /// Get the current position. fn offset(&self) -> CodeOffset; /// Add 1 byte to the code section. fn put1(&mut self, _: u8); /// Add 2 bytes to the code section. fn put2(&mut self, _: u16); /// Add 4 bytes to the code section. fn put4(&mut self, _: u32); /// Add 8 bytes to the code section. fn put8(&mut self, _: u64); /// Add a relocation referencing an EBB at the current offset. fn reloc_ebb(&mut self, _: Reloc, _: CodeOffset); /// Add a relocation referencing an external symbol plus the addend at the current offset. fn reloc_external(&mut self, _: Reloc, _: &ExternalName, _: Addend); /// Add a relocation referencing a jump table. fn reloc_jt(&mut self, _: Reloc, _: JumpTable); /// Add trap information for the current offset. fn trap(&mut self, _: TrapCode, _: SourceLoc); /// Machine code output is complete, jump table data may follow. fn begin_jumptables(&mut self); /// Jump table output is complete, raw read-only data may follow. fn begin_rodata(&mut self); /// Read-only data output is complete, we're done. fn end_codegen(&mut self); } /// Report a bad encoding error. #[cold] pub fn bad_encoding(func: &Function, inst: Inst) -> ! { panic!( "Bad encoding {} for {}", func.encodings[inst], func.dfg.display_inst(inst, None) ); } /// Emit a function to `sink`, given an instruction emitter function. /// /// This function is called from the `TargetIsa::emit_function()` implementations with the /// appropriate instruction emitter. pub fn emit_function<CS, EI>(func: &Function, emit_inst: EI, sink: &mut CS) where CS: CodeSink, EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS), { let mut divert = RegDiversions::new(); for ebb in func.layout.ebbs() { divert.clear(); debug_assert_eq!(func.offsets[ebb], sink.offset()); for inst in func.layout.ebb_insts(ebb) { emit_inst(func, inst, &mut divert, sink); } } sink.begin_jumptables(); // output jump tables for (jt, jt_data) in func.jump_tables.iter() { let jt_offset = func.jt_offsets[jt]; for ebb in jt_data.iter() { let rel_offset: i32 = func.offsets[*ebb] as i32 - jt_offset as i32; sink.put4(rel_offset as u32) } } sink.begin_rodata(); // TODO: No read-only data (constant pools) at this time. sink.end_codegen(); }
32.197861
99
0.655373
e8a2aeb111ad5e07c274ce1fa7c70e9745583fe6
13,009
use std::collections::HashMap; use crate::{ClientId, TxId, Money, TxType, Transaction}; use crate::csv::RawAccount; /// Error types return when processing account's transaction #[derive(Debug)] pub enum AccountError { // Account is frozen, cannot perform any other operation on it Frozen(ClientId), InssuficientFundsForWithdrawal(ClientId), NoTxForDispute(TxId), TxNotInDispute(TxId), } /// data structure representing account state #[derive(Debug, PartialEq)] pub struct Account { pub client_id: ClientId, // The total funds that are available for trading, staking, withdrawal, etc. This // should be equal to the total - held amounts pub available_amount: Money, // The total funds that are held for dispute. This should be equal to total - available amounts pub held_amount: Money, // The total funds that are available or held. This should be equal to available + held pub total_amount: Money, pub is_locked: bool, } /// converstion from RawAccount to Account impl From<Account> for RawAccount { fn from(source: Account) -> Self { RawAccount { client_id: source.client_id, available_amount: source.available_amount, held_amount: source.held_amount, total_amount: source.total_amount, is_locked: source.is_locked, } } } impl Default for Account { fn default() -> Self { Self { client_id: Default::default(), available_amount: Default::default(), held_amount: Default::default(), total_amount: Default::default(), is_locked: Default::default(), } } } impl Account { /// call by the account transaction processing task to handle supplied transaction /// the only side effect can be on a transaction in the history, when we need to change the state of in_dispute /// due to dispute/resolve/chargeback events /// /// `t` reference to transaction that is currently processed /// `history` mutable reference to the history of all transaction for given account /// /// return new Account instrance /// /// todo: improvement could be done in order to make this pure function. /// One ide is to return info that another transaction should be changed pub(crate) fn process_transaction( &self, t: &Transaction, history: &mut HashMap<TxId, Transaction>, ) -> core::result::Result<Self, AccountError> { use TxType::*; match t.tx_type { Deposit => self.deposit(t.amount), Withdrawal => self.withdrawal(t.amount), Dispute => self.dispute(t.tx_id, history), Resolve => self.resolve(t.tx_id, history), Chargeback => self.chargeback(t.tx_id, history), } } /// A deposit is a credit to the client's asset account, meaning it should increase the available and /// total funds of the client account fn deposit(&self, amount: Money) -> core::result::Result<Self, AccountError> { if self.is_locked { Err(AccountError::Frozen(self.client_id)) } else { let mut a = Account::default(); a.client_id = self.client_id; a.available_amount = self.available_amount + amount; a.held_amount = self.held_amount; a.total_amount = a.available_amount + a.held_amount; Ok(a) } } /// A withdraw is a debit to the client's asset account, meaning it should decrease the available and /// total funds of the client account /// If a client does not have sufficient available funds the withdrawal should fail and the total amount /// of funds should not change fn withdrawal(&self, amount: Money) -> core::result::Result<Self, AccountError> { if self.is_locked { Err(AccountError::Frozen(self.client_id)) } else if self.available_amount < amount { Err(AccountError::InssuficientFundsForWithdrawal(self.client_id)) } else { let mut a = Account::default(); a.client_id = self.client_id; a.available_amount = self.available_amount - amount; a.held_amount = self.held_amount; a.total_amount = a.available_amount + a.held_amount; Ok(a) } } /// A dispute represents a client's claim that a transaction was erroneous and should be reversed. /// The transaction shouldn't be reversed yet but the associated funds should be held. This means /// that the clients available funds should decrease by the amount disputed, their held funds should /// increase by the amount disputed, while their total funds should remain the same. /// Notice that a dispute does not state the amount disputed. Instead a dispute references the /// transaction that is disputed by ID. If the tx specified by the dispute doesn't exist you can ignore it /// and assume this is an error on our partners side. fn dispute( &self, tx_id: TxId, history: &mut HashMap<TxId, Transaction>, ) -> core::result::Result<Self, AccountError> { if self.is_locked { return Err(AccountError::Frozen(self.client_id)); } let t = history.get_mut(&tx_id); match t { Some(tx) => { tx.in_dispute = true; let mut a = Account::default(); a.client_id = self.client_id; a.available_amount = self.available_amount - tx.amount; a.held_amount = self.held_amount + tx.amount; a.total_amount = a.available_amount + a.held_amount; Ok(a) } None => Err(AccountError::NoTxForDispute(tx_id)), } } /// A resolve represents a resolution to a dispute, releasing the associated held funds. Funds that /// were previously disputed are no longer disputed. This means that the clients held funds should /// decrease by the amount no longer disputed, their available funds should increase by the /// amount no longer disputed, and their total funds should remain the same. /// Like disputes, resolves do not specify an amount. Instead they refer to a transaction that was /// under dispute by ID. If the tx specified doesn't exist, or the tx isn't under dispute, you can ignore /// the resolve and assume this is an error on our partner's side. fn resolve( &self, tx_id: TxId, history: &mut HashMap<TxId, Transaction>, ) -> core::result::Result<Self, AccountError> { if self.is_locked { return Err(AccountError::Frozen(self.client_id)); } let t = history.get_mut(&tx_id); match t { Some(tx) => { if tx.in_dispute { tx.in_dispute = false; let mut a = Account::default(); a.client_id = self.client_id; a.available_amount = self.available_amount + tx.amount; a.held_amount = self.held_amount - tx.amount; a.total_amount = a.available_amount + a.held_amount; Ok(a) } else { Err(AccountError::TxNotInDispute(tx_id)) } } None => Err(AccountError::NoTxForDispute(tx_id)), } } /// A chargeback is the final state of a dispute and represents the client reversing a transaction. /// Funds that were held have now been withdrawn. This means that the clients held funds and /// total funds should decrease by the amount previously disputed. If a chargeback occurs the /// client's account should be immediately frozen. /// Like a dispute and a resolve a chargeback refers to the transaction by ID (tx) and does not /// specify an amount. Like a resolve, if the tx specified doesn't exist, or the tx isn't under dispute, /// you can ignore chargeback and assume this is an error on our partner's side. fn chargeback( &self, tx_id: TxId, history: &mut HashMap<TxId, Transaction>, ) -> core::result::Result<Self, AccountError> { if self.is_locked { return Err(AccountError::Frozen(self.client_id)); } let t = history.get_mut(&tx_id); match t { Some(tx) => { if tx.in_dispute { tx.in_dispute = false; let mut a = Account::default(); a.client_id = self.client_id; a.available_amount = self.available_amount; a.held_amount = self.held_amount - tx.amount; a.total_amount = a.available_amount + a.held_amount; a.is_locked = true; Ok(a) } else { Err(AccountError::TxNotInDispute(tx_id)) } } None => Err(AccountError::NoTxForDispute(tx_id)), } } } #[cfg(test)] mod tests { use std::collections::HashMap; use crate::{account::Account, TxType, Transaction}; /// tests for default settings #[test] fn account_default() { let a = Account::default(); assert_eq!(a.client_id, 0); assert_eq!(a.available_amount, 0.0); assert_eq!(a.held_amount, 0.0); assert_eq!(a.total_amount, 0.0); assert_eq!(a.is_locked, false); } #[test] fn account_deposit() { let mut a = Account { client_id: 1, total_amount: 0.0, held_amount: 0.0, available_amount: 0.0, is_locked: false, }; let a1 = a.deposit(5.0).unwrap(); a = Account { client_id: 1, total_amount: 5.0, held_amount: 0.0, available_amount: 5.0, is_locked: false, }; assert_eq!(a, a1); } #[test] fn account_withdrawal() { let mut a = Account { client_id: 1, total_amount: 15.0, held_amount: 5.0, available_amount: 10.0, is_locked: false, }; let a1 = a.withdrawal(5.0).unwrap(); a = Account { client_id: 1, total_amount: 10.0, held_amount: 5.0, available_amount: 5.0, is_locked: false, }; assert_eq!(a, a1); } #[test] fn account_dispute() { let mut a = Account { client_id: 1, available_amount: 10.0, held_amount: 5.0, total_amount: 15.0, is_locked: false, }; let mut history = HashMap::<u32, Transaction>::new(); history.insert( 1, Transaction { tx_type: TxType::Deposit, client_id: 1, tx_id: 1, amount: 10.0, in_dispute: false, }, ); let a1 = a.dispute(1, &mut history).unwrap(); a = Account { client_id: 1, available_amount: 0.0, held_amount: 15.0, total_amount: 15.0, is_locked: false, }; assert_eq!(a, a1); } #[test] fn account_resolve() { let mut a = Account { client_id: 1, available_amount: 0.0, held_amount: 15.0, total_amount: 15.0, is_locked: false, }; let mut history = HashMap::<u32, Transaction>::new(); history.insert( 1, Transaction { tx_type: TxType::Deposit, client_id: 1, tx_id: 1, amount: 10.0, in_dispute: true, }, ); let a1 = a.resolve(1, &mut history).unwrap(); a = Account { client_id: 1, available_amount: 10.0, held_amount: 5.0, total_amount: 15.0, is_locked: false, }; assert_eq!(a, a1); } #[test] fn account_chargeback() { let mut a = Account { client_id: 1, available_amount: 10.0, held_amount: 15.0, total_amount: 25.0, is_locked: false, }; let mut history = HashMap::<u32, Transaction>::new(); history.insert( 1, Transaction { tx_type: TxType::Deposit, client_id: 1, tx_id: 1, amount: 10.0, in_dispute: true, }, ); let a1 = a.chargeback(1, &mut history).unwrap(); a = Account { client_id: 1, available_amount: 10.0, held_amount: 5.0, total_amount: 15.0, is_locked: true, }; assert_eq!(a, a1); } }
34.876676
115
0.563302
fe38b15783dc947252533582a3a457b4b8335e5b
10,498
use crate::config::PlatformConfiguration; use crate::platform::regular_platform::RegularPlatform; use crate::toolchain::ToolchainConfig; use crate::{Compiler, Device, Platform, PlatformManager, Result}; use std::{env, fs, path, process, sync}; pub use self::device::AndroidDevice; use anyhow::Context; mod device; pub struct AndroidManager { compiler: sync::Arc<Compiler>, adb: path::PathBuf, } impl PlatformManager for AndroidManager { fn devices(&self) -> Result<Vec<Box<dyn Device>>> { let result = process::Command::new(&self.adb).arg("devices").output()?; let mut devices = vec![]; let device_regex = ::regex::Regex::new(r#"^(\S+)\tdevice\r?$"#)?; for line in String::from_utf8(result.stdout)?.split("\n").skip(1) { if let Some(caps) = device_regex.captures(line) { let d = AndroidDevice::from_id(self.adb.clone(), &caps[1])?; debug!( "Discovered Android device {} ({:?})", d, d.supported_targets ); devices.push(Box::new(d) as Box<dyn Device>); } } Ok(devices) } fn platforms(&self) -> Result<Vec<Box<dyn Platform>>> { if let Some(ndk) = ndk()? { let default_api_level = "21"; debug!("Android NDK: {:?}", ndk); let version = ndk_version(&ndk)?; let major = version .split(".") .next() .ok_or_else(|| anyhow!("Invalid version found for ndk {:?}", &ndk))?; let major: usize = major .parse() .with_context(|| format!("Invalid version found for ndk {:?}", &ndk))?; debug!( "Android ndk: {:?}, ndk version: {}, major: {}", ndk, version, major ); if major >= 19 { let mut platforms = vec![]; let prebuilt = ndk.join("toolchains/llvm/prebuilt"); let tools = prebuilt .read_dir()? .next() .ok_or_else(|| anyhow!("No tools in toolchain"))??; let bin = tools.path().join("bin"); debug!("Android tools bin: {:?}", bin); for (rustc_cpu, cc_cpu, binutils_cpu, abi_kind) in &[ ("aarch64", "aarch64", "aarch64", "android"), ("armv7", "armv7a", "arm", "androideabi"), ("i686", "i686", "i686", "android"), ("x86_64", "x86_64", "x86_64", "android"), ] { let mut api_levels: Vec<String> = Vec::new(); for entry in tools .path() .join(format!( "sysroot/usr/lib/{}-linux-{}", binutils_cpu, abi_kind )) .read_dir()? { let entry = entry?; if entry.file_type()?.is_dir() { let folder_name = entry.file_name().into_string().unwrap(); match folder_name.parse::<u32>() { Ok(_) => api_levels.push(folder_name), Err(_) => {} } } } api_levels.sort(); let create_platform = |api: &str, suffix: &str| { let id = format!("auto-android-{}{}", rustc_cpu, suffix); let tc = ToolchainConfig { bin_dir: bin.clone(), rustc_triple: format!("{}-linux-{}", rustc_cpu, abi_kind), root: prebuilt.clone(), sysroot: Some(tools.path().join("sysroot")), cc: "clang".to_string(), binutils_prefix: format!("{}-linux-{}", binutils_cpu, abi_kind), cc_prefix: format!("{}-linux-{}{}", cc_cpu, abi_kind, api), }; RegularPlatform::new_with_tc( self.compiler.clone(), PlatformConfiguration::default(), id, tc, ) }; for api in api_levels.iter() { platforms.push(create_platform(&api, &format!("-api{}", api))?); } if !api_levels.is_empty() { platforms.push(create_platform( api_levels .first() .expect("The api level vector shouldn't be empty"), "-min", )?); platforms.push(create_platform( api_levels .last() .expect("The api level vector shouldn't be empty"), "-latest", )?); } platforms.push(create_platform(default_api_level, "")?); } return Ok(platforms); } } return Ok(vec![]); } } impl AndroidManager { pub fn probe(compiler: sync::Arc<Compiler>) -> Option<AndroidManager> { match adb() { Ok(adb) => { debug!("ADB found: {:?}", adb); Some(AndroidManager { adb, compiler }) } Err(_) => { debug!("adb not found in path, android disabled"); None } } } } fn probable_sdk_locs() -> Result<Vec<path::PathBuf>> { let mut v = vec![]; for var in &[ "ANDROID_HOME", "ANDROID_SDK", "ANDROID_SDK_ROOT", "ANDROID_SDK_HOME", ] { if let Ok(path) = env::var(var) { let path = path::Path::new(&path); if path.is_dir() { v.push(path.to_path_buf()) } } } if let Ok(home) = env::var("HOME") { let mac = path::Path::new(&home).join("/Library/Android/sdk"); if mac.is_dir() { v.push(mac); } } let casks = path::PathBuf::from("/usr/local/Caskroom/android-sdk"); if casks.is_dir() { for kid in casks.read_dir()? { let kid = kid?; if kid.file_name() != ".metadata" { v.push(kid.path()); } } } debug!("Candidates SDK: {:?}", v); Ok(v) } fn ndk() -> Result<Option<path::PathBuf>> { if let Ok(path) = env::var("ANDROID_NDK_HOME") { return Ok(Some(path.into())); } for sdk in probable_sdk_locs()? { if sdk.join("ndk-bundle/source.properties").is_file() { return Ok(Some(sdk.join("ndk-bundle"))); } if let Some(ndk) = find_non_legacy_ndk(&sdk)? { return Ok(Some(ndk)); } } debug!("Android NDK not found"); Ok(None) } fn ndk_version(ndk: &path::Path) -> Result<String> { let sources_prop_file = ndk.join("source.properties"); let props = fs::read_to_string(&sources_prop_file).with_context(|| { format!( "Android NDK at {:?} does not contains a valid ndk-bundle: opening: {:?}", ndk, sources_prop_file ) })?; let revision_line = props .split("\n") .find(|l| l.starts_with("Pkg.Revision")) .with_context(|| { format!( "{:?} does not contain a Pkg.Revision line. Invalid SDK?", sources_prop_file ) })?; Ok(revision_line.split(" ").last().unwrap().to_string()) } fn adb() -> Result<path::PathBuf> { fn try_out(command: &path::Path) -> bool { match process::Command::new(command) .arg("--version") .stdout(process::Stdio::null()) .stderr(process::Stdio::null()) .status() { Ok(_) => true, Err(_) => false, } } if let Ok(adb) = env::var("DINGHY_ANDROID_ADB") { return Ok(adb.into()); } if let Ok(adb) = ::which::which("adb") { return Ok(adb); } for loc in probable_sdk_locs()? { let adb = loc.join("platform-tools/adb"); if try_out(&adb) { return Ok(adb.into()); } } bail!("Adb could be found") } fn find_non_legacy_ndk(sdk: &path::Path) -> Result<Option<path::PathBuf>> { let ndk_root = sdk.join("ndk"); if !ndk_root.is_dir() { return Ok(None); } let ndk = ndk_root .read_dir() .with_context(|| format!("Cannot open NDK directory at {}", ndk_root.display()))? .filter_map(Result::ok) .filter_map(|directory| { directory .path() .file_name() .and_then(|name| { let name = name.to_string_lossy(); // Filter out directory if we fail to parse directory name to semver semver::Version::parse(&name).ok() }) .map(|version| (directory, version)) }) .max_by(|left, right| { let left_version: &semver::Version = &left.1; let right_version: &semver::Version = &right.1; left_version.cmp(right_version) }) .map(|tuple| tuple.0.path()); Ok(ndk) } #[cfg(test)] mod tests { use super::*; #[test] fn test_find_non_legacy_ndk() { let sdk_dir = tempfile::tempdir().unwrap(); let sdk = sdk_dir.path(); let ndk_root = sdk.join("ndk"); let ndk_versions = ["21.1.123456", "21.3.6528147", "20.1.5948944"]; for version in &ndk_versions { let path = ndk_root.join(version); fs::create_dir_all(path).unwrap(); } let ndk = find_non_legacy_ndk(sdk).unwrap(); let expected = ndk_root.join("21.3.6528147"); assert_eq!(Some(expected), ndk); } #[test] fn test_find_non_legacy_ndk_on_non_existing_directory() { let sdk = tempfile::tempdir().unwrap(); let ndk = find_non_legacy_ndk(sdk.path()).unwrap(); assert_eq!(None, ndk); } }
34.877076
92
0.456944
e4c6c203833d542e4f9d8701f0a8817afd7a39e1
134
mod friends_leaderboard; mod leaderboard; pub use friends_leaderboard::FriendsLeaderboardPage; pub use leaderboard::LeaderboardPage;
22.333333
52
0.858209
e5b2d5eedcbac7ace805e20acc530bf51b4a7181
3,003
use bevy::{prelude::*, scene::InstanceId}; fn main() { App::build() .add_resource(Msaa { samples: 4 }) .add_plugins(DefaultPlugins) .add_resource(SceneInstance::default()) .add_startup_system(setup.system()) .add_system(scene_update.system()) .add_system(move_scene_entities.system()) .run(); } // Resource to hold the scene `instance_id` until it is loaded #[derive(Default)] struct SceneInstance(Option<InstanceId>); // Component that will be used to tag entities in the scene struct EntityInMyScene; fn setup( commands: &mut Commands, asset_server: Res<AssetServer>, mut scene_spawner: ResMut<SceneSpawner>, mut scene_instance: ResMut<SceneInstance>, ) { commands .spawn(LightBundle { transform: Transform::from_xyz(4.0, 5.0, 4.0), ..Default::default() }) .spawn(PerspectiveCameraBundle { transform: Transform::from_xyz(1.05, 0.9, 1.5) .looking_at(Vec3::new(0.0, 0.3, 0.0), Vec3::unit_y()), ..Default::default() }); // Spawn the scene as a child of another entity. This first scene will be translated backward // with its parent commands .spawn(( Transform::from_xyz(0.0, 0.0, -1.0), GlobalTransform::default(), )) .with_children(|parent| { parent.spawn_scene(asset_server.load("models/FlightHelmet/FlightHelmet.gltf#Scene0")); }); // Spawn a second scene, and keep its `instance_id` let instance_id = scene_spawner.spawn(asset_server.load("models/FlightHelmet/FlightHelmet.gltf#Scene0")); scene_instance.0 = Some(instance_id); } // This system will wait for the scene to be ready, and then tag entities from // the scene with `EntityInMyScene`. All entities from the second scene will be // tagged fn scene_update( commands: &mut Commands, scene_spawner: Res<SceneSpawner>, scene_instance: Res<SceneInstance>, mut done: Local<bool>, ) { if !*done { if let Some(instance_id) = scene_instance.0 { if let Some(entity_iter) = scene_spawner.iter_instance_entities(instance_id) { entity_iter.for_each(|entity| { commands.insert_one(entity, EntityInMyScene); }); *done = true; } } } } // This system will move all entities with component `EntityInMyScene`, so all // entities from the second scene fn move_scene_entities( time: Res<Time>, mut scene_entities: Query<&mut Transform, With<EntityInMyScene>>, ) { let mut direction = 1.; let mut scale = 1.; for mut transform in scene_entities.iter_mut() { transform.translation = Vec3::new( scale * direction * time.seconds_since_startup().sin() as f32 / 20., 0., time.seconds_since_startup().cos() as f32 / 20., ); direction *= -1.; scale += 0.5; } }
31.946809
98
0.618715
0855a7c0d56dc70f4993457d092182b1516c2d40
215
#[allow(dead_code)] #[rustversion::attr(stable(1.45), test)] fn derive_props() { let t = trybuild::TestCases::new(); t.pass("tests/derive_props/pass.rs"); t.compile_fail("tests/derive_props/fail.rs"); }
26.875
49
0.669767
69ecdaf0350fa779b0cd4a994f0713aa049cb0d5
5,874
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ core_mempool::{CoreMempool, TimelineState, TxnPointer}, proto::mempool::Mempool, OP_COUNTERS, }; use futures::Future; use grpc_helpers::{create_grpc_invalid_arg_status, default_reply_error_logger}; use libra_logger::prelude::*; use libra_metrics::counters::SVC_COUNTERS; use libra_types::{ account_address::AccountAddress, proto::types::SignedTransactionsBlock, transaction::SignedTransaction, }; use std::{ cmp, collections::HashSet, convert::TryFrom, sync::{Arc, Mutex}, time::Duration, }; #[derive(Clone)] pub(crate) struct MempoolService { pub(crate) core_mempool: Arc<Mutex<CoreMempool>>, } impl Mempool for MempoolService { fn add_transaction_with_validation( &mut self, ctx: ::grpcio::RpcContext<'_>, req: crate::proto::mempool::AddTransactionWithValidationRequest, sink: ::grpcio::UnarySink<crate::proto::mempool::AddTransactionWithValidationResponse>, ) { trace!("[GRPC] Mempool::add_transaction_with_validation"); let _timer = SVC_COUNTERS.req(&ctx); let mut success = true; let proto_transaction = req.transaction.unwrap_or_else(Default::default); match SignedTransaction::try_from(proto_transaction) { Err(e) => { success = false; ctx.spawn( sink.fail(create_grpc_invalid_arg_status( "add_transaction_with_validation", e, )) .map_err(default_reply_error_logger), ); } Ok(transaction) => { let insertion_result = self .core_mempool .lock() .expect("[add txn] acquire mempool lock") .add_txn( transaction, req.max_gas_cost, req.latest_sequence_number, req.account_balance, TimelineState::NotReady, ); let mut response = crate::proto::mempool::AddTransactionWithValidationResponse::default(); response.status = Some(insertion_result.into()); ctx.spawn(sink.success(response).map_err(default_reply_error_logger)) } } SVC_COUNTERS.resp(&ctx, success); } fn get_block( &mut self, ctx: ::grpcio::RpcContext<'_>, req: super::proto::mempool::GetBlockRequest, sink: ::grpcio::UnarySink<super::proto::mempool::GetBlockResponse>, ) { trace!("[GRPC] Mempool::get_block"); let _timer = SVC_COUNTERS.req(&ctx); let block_size = cmp::max(req.max_block_size, 1); OP_COUNTERS.inc_by("get_block.requested", block_size as usize); let exclude_transactions: HashSet<TxnPointer> = req .transactions .iter() .map(|t| (AccountAddress::try_from(&t.sender[..]), t.sequence_number)) .filter(|(address, _)| address.is_ok()) .map(|(address, seq)| (address.unwrap(), seq)) .collect(); let mut txns = self .core_mempool .lock() .expect("[get_block] acquire mempool lock") .get_block(block_size, exclude_transactions); let transactions = txns.drain(..).map(SignedTransaction::into).collect(); let mut block = SignedTransactionsBlock::default(); block.transactions = transactions; OP_COUNTERS.inc_by("get_block.returned", block.transactions.len()); let mut response = crate::proto::mempool::GetBlockResponse::default(); response.block = Some(block); ctx.spawn(sink.success(response).map_err(default_reply_error_logger)); SVC_COUNTERS.resp(&ctx, true); } fn commit_transactions( &mut self, ctx: ::grpcio::RpcContext<'_>, req: crate::proto::mempool::CommitTransactionsRequest, sink: ::grpcio::UnarySink<crate::proto::mempool::CommitTransactionsResponse>, ) { trace!("[GRPC] Mempool::commit_transaction"); let _timer = SVC_COUNTERS.req(&ctx); OP_COUNTERS.inc_by("commit_transactions.requested", req.transactions.len()); let mut pool = self .core_mempool .lock() .expect("[update status] acquire mempool lock"); for transaction in &req.transactions { if let Ok(address) = AccountAddress::try_from(&transaction.sender[..]) { let sequence_number = transaction.sequence_number; pool.remove_transaction(&address, sequence_number, transaction.is_rejected); } } let block_timestamp_usecs = req.block_timestamp_usecs; if block_timestamp_usecs > 0 { pool.gc_by_expiration_time(Duration::from_micros(block_timestamp_usecs)); } let response = crate::proto::mempool::CommitTransactionsResponse::default(); ctx.spawn(sink.success(response).map_err(default_reply_error_logger)); SVC_COUNTERS.resp(&ctx, true); } fn health_check( &mut self, ctx: ::grpcio::RpcContext<'_>, _req: crate::proto::mempool::HealthCheckRequest, sink: ::grpcio::UnarySink<crate::proto::mempool::HealthCheckResponse>, ) { trace!("[GRPC] Mempool::health_check"); let pool = self .core_mempool .lock() .expect("[health_check] acquire mempool lock"); let mut response = crate::proto::mempool::HealthCheckResponse::default(); response.is_healthy = pool.health_check(); ctx.spawn(sink.success(response).map_err(default_reply_error_logger)); } }
38.142857
95
0.601124
de08f19aa72625fd23e2b71287abcad69ab74583
8,141
//! Used to create queries of type [`ReadQuery`](crate::query::read_query::ReadQuery) or //! [`WriteQuery`](crate::query::write_query::WriteQuery) which can be executed in InfluxDB //! //! # Examples //! //! ```rust //! use influxdb::{Query, Timestamp}; //! use influxdb::InfluxDbWriteable; //! //! let write_query = Timestamp::Nanoseconds(0).into_query("measurement") //! .add_field("field1", 5) //! .add_tag("author", "Gero") //! .build(); //! //! assert!(write_query.is_ok()); //! //! let read_query = Query::raw_read_query("SELECT * FROM weather") //! .build(); //! //! assert!(read_query.is_ok()); //! ``` use chrono::prelude::{DateTime, TimeZone, Utc}; use std::convert::TryInto; pub mod consts; mod line_proto_term; pub mod read_query; pub mod write_query; use std::fmt; use crate::{Error, ReadQuery, WriteQuery}; use consts::{MILLIS_PER_SECOND, MINUTES_PER_HOUR, NANOS_PER_MILLI, SECONDS_PER_MINUTE}; #[cfg(feature = "derive")] pub use influxdb_derive::InfluxDbWriteable; #[derive(PartialEq, Debug, Copy, Clone)] pub enum Timestamp { Nanoseconds(u128), Microseconds(u128), Milliseconds(u128), Seconds(u128), Minutes(u128), Hours(u128), } impl fmt::Display for Timestamp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use Timestamp::*; match self { Nanoseconds(ts) | Microseconds(ts) | Milliseconds(ts) | Seconds(ts) | Minutes(ts) | Hours(ts) => write!(f, "{}", ts), } } } impl From<Timestamp> for DateTime<Utc> { fn from(ts: Timestamp) -> DateTime<Utc> { match ts { Timestamp::Hours(h) => { let nanos = h * MINUTES_PER_HOUR * SECONDS_PER_MINUTE * MILLIS_PER_SECOND * NANOS_PER_MILLI; Utc.timestamp_nanos(nanos.try_into().unwrap()) } Timestamp::Minutes(m) => { let nanos = m * SECONDS_PER_MINUTE * MILLIS_PER_SECOND * NANOS_PER_MILLI; Utc.timestamp_nanos(nanos.try_into().unwrap()) } Timestamp::Seconds(s) => { let nanos = s * MILLIS_PER_SECOND * NANOS_PER_MILLI; Utc.timestamp_nanos(nanos.try_into().unwrap()) } Timestamp::Milliseconds(millis) => { let nanos = millis * NANOS_PER_MILLI; Utc.timestamp_nanos(nanos.try_into().unwrap()) } Timestamp::Nanoseconds(nanos) => Utc.timestamp_nanos(nanos.try_into().unwrap()), Timestamp::Microseconds(mis) => { let nanos = mis / 10000; Utc.timestamp_nanos(nanos.try_into().unwrap()) } } } } impl<T> From<DateTime<T>> for Timestamp where T: TimeZone, { fn from(date_time: DateTime<T>) -> Self { Timestamp::Nanoseconds(date_time.timestamp_nanos() as u128) } } pub trait Query { /// Builds valid InfluxSQL which can be run against the Database. /// In case no fields have been specified, it will return an error, /// as that is invalid InfluxSQL syntax. /// /// # Examples /// /// ```rust /// use influxdb::{Query, Timestamp}; /// use influxdb::InfluxDbWriteable; /// /// let invalid_query = Timestamp::Nanoseconds(0).into_query("measurement").build(); /// assert!(invalid_query.is_err()); /// /// let valid_query = Timestamp::Nanoseconds(0).into_query("measurement").add_field("myfield1", 11).build(); /// assert!(valid_query.is_ok()); /// ``` fn build(&self) -> Result<ValidQuery, Error>; fn get_type(&self) -> QueryType; } pub trait InfluxDbWriteable { fn into_query<I: Into<String>>(self, name: I) -> WriteQuery; } impl InfluxDbWriteable for Timestamp { fn into_query<I: Into<String>>(self, name: I) -> WriteQuery { WriteQuery::new(self, name.into()) } } impl dyn Query { /// Returns a [`ReadQuery`](crate::ReadQuery) builder. /// /// # Examples /// /// ```rust /// use influxdb::Query; /// /// Query::raw_read_query("SELECT * FROM weather"); // Is of type [`ReadQuery`](crate::ReadQuery) /// ``` pub fn raw_read_query<S>(read_query: S) -> ReadQuery where S: Into<String>, { ReadQuery::new(read_query) } } #[derive(Debug)] #[doc(hidden)] pub struct ValidQuery(String); impl ValidQuery { pub fn get(self) -> String { self.0 } } impl<T> From<T> for ValidQuery where T: Into<String>, { fn from(string: T) -> Self { Self(string.into()) } } impl PartialEq<String> for ValidQuery { fn eq(&self, other: &String) -> bool { &self.0 == other } } impl PartialEq<&str> for ValidQuery { fn eq(&self, other: &&str) -> bool { &self.0 == other } } /// Internal Enum used to decide if a `POST` or `GET` request should be sent to InfluxDB. See [InfluxDB Docs](https://docs.influxdata.com/influxdb/v1.7/tools/api/#query-http-endpoint). #[derive(PartialEq, Debug)] pub enum QueryType { ReadQuery, /// write query with precision WriteQuery(String), } #[cfg(test)] mod tests { use super::consts::{ MICROS_PER_NANO, MILLIS_PER_SECOND, MINUTES_PER_HOUR, NANOS_PER_MILLI, SECONDS_PER_MINUTE, }; use crate::query::{Timestamp, ValidQuery}; use chrono::prelude::{DateTime, TimeZone, Utc}; use std::convert::TryInto; #[test] fn test_equality_str() { assert_eq!(ValidQuery::from("hello"), "hello"); } #[test] fn test_equality_string() { assert_eq!( ValidQuery::from(String::from("hello")), String::from("hello") ); } #[test] fn test_format_for_timestamp_else() { assert!(format!("{}", Timestamp::Nanoseconds(100)) == "100"); } #[test] fn test_chrono_datetime_from_timestamp_hours() { let datetime_from_timestamp: DateTime<Utc> = Timestamp::Hours(2).into(); assert_eq!( Utc.timestamp_nanos( (2 * MINUTES_PER_HOUR * SECONDS_PER_MINUTE * MILLIS_PER_SECOND * NANOS_PER_MILLI) .try_into() .unwrap() ), datetime_from_timestamp ) } #[test] fn test_chrono_datetime_from_timestamp_minutes() { let datetime_from_timestamp: DateTime<Utc> = Timestamp::Minutes(2).into(); assert_eq!( Utc.timestamp_nanos( (2 * SECONDS_PER_MINUTE * MILLIS_PER_SECOND * NANOS_PER_MILLI) .try_into() .unwrap() ), datetime_from_timestamp ) } #[test] fn test_chrono_datetime_from_timestamp_seconds() { let datetime_from_timestamp: DateTime<Utc> = Timestamp::Seconds(2).into(); assert_eq!( Utc.timestamp_nanos( (2 * MILLIS_PER_SECOND * NANOS_PER_MILLI) .try_into() .unwrap() ), datetime_from_timestamp ) } #[test] fn test_chrono_datetime_from_timestamp_millis() { let datetime_from_timestamp: DateTime<Utc> = Timestamp::Milliseconds(2).into(); assert_eq!( Utc.timestamp_nanos((2 * NANOS_PER_MILLI).try_into().unwrap()), datetime_from_timestamp ) } #[test] fn test_chrono_datetime_from_timestamp_nanos() { let datetime_from_timestamp: DateTime<Utc> = Timestamp::Nanoseconds(1).into(); assert_eq!(Utc.timestamp_nanos(1), datetime_from_timestamp) } #[test] fn test_chrono_datetime_from_timestamp_micros() { let datetime_from_timestamp: DateTime<Utc> = Timestamp::Microseconds(1).into(); assert_eq!( Utc.timestamp_nanos((1 / MICROS_PER_NANO).try_into().unwrap()), datetime_from_timestamp ) } #[test] fn test_timestamp_from_chrono_date() { let timestamp_from_datetime: Timestamp = Utc.ymd(1970, 1, 1).and_hms(0, 0, 1).into(); assert_eq!( Timestamp::Nanoseconds(MILLIS_PER_SECOND * NANOS_PER_MILLI), timestamp_from_datetime ) } }
30.151852
184
0.595013
f83386728f204284c0163496253c369c72426aed
58,243
//! Everything related to the [`DirectedEdgeMesh`]. // # Some notes for developers about this implementation // // - The twin half edges are stored implicitly: twins are always stored next to // one another in the underlying vector and thus always have handle indices // only one apart. Furthermore, since we start with the handle index 0, the // indices of two twins are always 2k and 2k + 1 where k is an integer. // - We map edge handles to half edge handles by multiplying by two. Half edge // to edge is integer division by two. This works out very nicely: the edge // handle space is contiguous and the conversion operations are a simple // shift. use std::{ fmt, marker::PhantomData, ops, }; use optional::Optioned as Opt; use crate::{ prelude::*, handle::{hsize, Handle}, map::{DenseMap, set::DenseSet}, mesh::SplitEdgeWithFacesResult, traits::marker::{Bool, False, TriFaces, True}, }; use super::{Checked, TypeOpt}; use self::adj::{CwVertexCirculator, CwVertexCirculatorState}; mod adj; #[cfg(test)] mod tests; const NON_MANIFOLD_EDGE_ERR: &str = "new face would add a non-manifold edge"; // =============================================================================================== // ===== Compile time configuration of DirectedEdgeMesh // =============================================================================================== /// Compile-time configuration for [`DirectedEdgeMesh`]. /// /// To configure a directed edge mesh, either use [`DefaultConfig`], or create /// your own (preferably inhabitable) type and implement this trait. pub trait Config: 'static { /// Specifies whether a `next` handle is stored per directed edge. This is /// usually not necessary as the handle can be obtained by a simple /// calculation. /// /// TODO: check in benchmarks! type StoreNext: Bool; /// Specifies whether a `prev` handle is stored per directed edge. This is /// usually not necessary as the handle can be obtained by a simple /// calculation. /// /// TODO: check in benchmarks! type StorePrev: Bool; // TODO: // - allow multi fan blades? // - source/target vertex } /// The standard configuration for the directed edge mesh. #[allow(missing_debug_implementations)] pub enum DefaultConfig {} impl Config for DefaultConfig { type StoreNext = False; type StorePrev = False; } // =============================================================================================== // ===== HalfEdgeHandle and handle helper // =============================================================================================== /// Handle to refer to half edges. #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] struct HalfEdgeHandle(hsize); impl Handle for HalfEdgeHandle { #[inline(always)] fn new(id: hsize) -> Self { HalfEdgeHandle(id) } #[inline(always)] fn idx(&self) -> hsize { self.0 } } impl HalfEdgeHandle { #[inline(always)] fn first_around(fh: FaceHandle) -> Self { Self::new(fh.idx() * 3) } #[inline(always)] fn face(&self) -> FaceHandle { FaceHandle::new(self.idx() / 3) } } impl fmt::Debug for HalfEdgeHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "HE{}", self.idx()) } } /// An implementation of the *directed edge mesh*. This is sometimes described /// as "memory efficient version of the half edge mesh for triangle meshes". /// /// This data structure stores information in directed edges which are stored /// per face (each face has exactly three). Each directed edge stores its twin /// directed edge and its target vertex. The `next` and `prev` handles to /// circulate around a face are typically not stored but given implictly by the /// memory location of the directed edge: all three directed edges of a face /// are stored contiguously in memory. /// /// The literature mentions that it's not trivial to handle the boundary of a /// mesh with this data structure. However, there doesn't seem to be a standard /// way how to actually implement boundary operations in this data structure. /// As such, this implementation uses one solution that works for now, but this /// might get changed in the future. /// /// /// # References /// /// Initially introduced in: Campagna, Swen, Leif Kobbelt, and Hans-Peter /// Seidel. "Directed edges—A scalable representation for triangle meshes." /// Journal of Graphics tools 3.4 (1998): 1-11. #[derive(Empty)] pub struct DirectedEdgeMesh<C: Config = DefaultConfig> { vertices: DenseMap<VertexHandle, Vertex>, half_edges: DenseMap<HalfEdgeHandle, HalfEdge<C>>, _config: PhantomData<C>, } /// Data stored per `Vertex`. #[derive(Clone, Copy)] pub(crate) struct Vertex { /// Handle of one outgoing half edge. If the vertex is a boundary vertex, /// this points to one of the boundary half edges. outgoing: Opt<Checked<HalfEdgeHandle>>, } /// The type of the `twin` field, which has a special encoding. /// /// If the edge a half edge belongs to is interior, its `twin` field simply /// points to the other half edge of that edge. If however, the edge is a /// boundary edge, things get complicated. /// /// The original paper suggest to store -1 to indicate a boundary edge and -2 /// to indicate a non-manifold edge. Since this library does not support /// non-manifold edges anyway, we don't need the -2 sentinal value. The /// original paper also does not describe a way to iterate along the boundary /// of a mesh. We can improve this by using this field to store the next /// boundary edge (kind of). /// /// Let's take a look at the following mesh: edges a to d are boundary /// edges. /// /// ```text /// a b c d /// ----- o ----- o ----- o ----- /// / \ / \ / \ /// / \ / \ / \ /// / \ / \ / \ /// o o /// ... /// ``` /// /// Boundary *half* edges are never stored, but the inner halves of a to d /// are stored. Since they belong to a boundary edge, their `twin` field /// does not point to a twin! Instead, the handle's most significant bit /// (sign bit) is set and the remaining bits form a handle which points to /// the half edge of the "next" boundary edge. Here, "next" is defined like /// the next around faces: counter clock-wise. Note that "counter /// clock-wise" makes intuitive sense when talking about holes in the mesh, /// but is "intuitively" inverted when talking about outer boundaries. /// /// In this case, `a.twin` has its sign bit set and points to `b`. `b` to /// `c` and so on. /// /// As this type deals with a lot of `Checked<HalfEdgeHandle>`s, many functions /// contain `unsafe`. This is always fine as long as the user creates instances /// of this type only from valid handles. But as the constructors already take /// `Checked` handles, we know those are valid. /// /// TODO: Think about maybe not using the first bit, but instead comparing this /// number to the length of the half edge vector. That way we can utilize the /// whole handle space. #[derive(Clone, Copy, PartialEq)] struct EncodedTwin(hsize); const TWIN_MASK: hsize = 1 << (std::mem::size_of::<hsize>() * 8 - 1); impl EncodedTwin { /// Returns a dummy value that has to be overwritten! #[inline(always)] unsafe fn dummy() -> Self { Self(0) } #[inline(always)] fn next_boundary_he(he: Checked<HalfEdgeHandle>) -> Self { Self(he.idx() | TWIN_MASK) } #[inline(always)] fn twin(he: Checked<HalfEdgeHandle>) -> Self { Self(he.idx()) } /// Decode it into the easier to use form. fn decode(&self) -> Twin { if self.is_real_twin() { // See type documentation on `unsafe` unsafe { Twin::Twin(Checked::new(HalfEdgeHandle::new(self.0))) } } else { // See type documentation on `unsafe` unsafe { Twin::NextBoundaryHe(Checked::new(HalfEdgeHandle::new(self.0 & !TWIN_MASK))) } } } fn or_next_boundary_he(&self) -> Checked<HalfEdgeHandle> { // See type documentation on `unsafe` unsafe { Checked::new(HalfEdgeHandle::new(self.0 & !TWIN_MASK)) } } /// Create a new encoded twin that is always a "next boundary half edge" /// and uses the half edge that is stored in `self`. The purpose of the /// half edge in `self` is ignored. fn to_next_boundary_he(&self) -> EncodedTwin { Self(self.0 | TWIN_MASK) } #[inline(always)] fn is_real_twin(&self) -> bool { self.0 & TWIN_MASK == 0 } fn as_real_twin(&self) -> Option<Checked<HalfEdgeHandle>> { if self.is_real_twin() { // See type documentation on `unsafe` unsafe { Some(Checked::new(HalfEdgeHandle::new(self.0))) } } else { None } } fn as_next_boundary_he(&self) -> Option<Checked<HalfEdgeHandle>> { if self.is_real_twin() { None } else { // See type documentation on `unsafe` unsafe { Some(Checked::new(HalfEdgeHandle::new(self.0 & !TWIN_MASK))) } } } } #[derive(Clone, Copy)] enum Twin { Twin(Checked<HalfEdgeHandle>), NextBoundaryHe(Checked<HalfEdgeHandle>), } /// Data stored per half edge. pub(crate) struct HalfEdge<C: Config> { /// The specially encoded twin. See [`EncodedTwin`] for more information. twin: EncodedTwin, /// The vertex this half edge points to. target: Checked<VertexHandle>, /// Points to the next half edge around the face. This is always equal to /// `(self_id / 3) * 3 + (self_id + 1) % 3` where `self_id` is the id of /// the handle of this half edge. next: TypeOpt<Checked<HalfEdgeHandle>, C::StoreNext>, /// Points to the previous half edge around the face. This is always equal /// to `(self_id / 3) * 3 + (self_id + 2) % 3` where `self_id` is the id of /// the handle of this half edge. prev: TypeOpt<Checked<HalfEdgeHandle>, C::StorePrev>, } impl<C: Config> HalfEdge<C> { /// Returns an instance with only `target` initialized to the given value /// while all other fields contain dummy values. These fields need to be /// overwritten since they contain bogus information. unsafe fn dummy_to(target: Checked<VertexHandle>) -> Self { Self { twin: EncodedTwin::dummy(), target, next: Checked::new(HalfEdgeHandle::new(0)).into(), prev: Checked::new(HalfEdgeHandle::new(0)).into(), } } fn is_boundary(&self) -> bool { !self.twin.is_real_twin() } } impl<C: Config> fmt::Debug for DirectedEdgeMesh<C> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("DirectedEdgeMesh") .field("vertices", &self.vertices) .field("half_edges", &self.half_edges) .finish() } } impl<C: Config> Clone for DirectedEdgeMesh<C> { fn clone(&self) -> Self { Self { vertices: self.vertices.clone(), half_edges: self.half_edges.clone(), _config: PhantomData, } } } impl fmt::Debug for Vertex { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Vertex {{ outgoing: {:?} }}", self.outgoing) } } impl fmt::Debug for EncodedTwin { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.decode().fmt(f) } } impl fmt::Debug for Twin { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Twin::Twin(h) => write!(f, "Twin({:?})", h), Twin::NextBoundaryHe(h) => write!(f, "NextB({:?})", h), } } } impl<C: Config> Copy for HalfEdge<C> {} impl<C: Config> Clone for HalfEdge<C> { fn clone(&self) -> Self { Self { twin: self.twin.clone(), target: self.target.clone(), next: self.next.clone(), prev: self.prev.clone(), } } } impl<C: Config> fmt::Debug for HalfEdge<C> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let next = self.next.into_option() .map(|next| format!(" next: {:6}", format!("{:?},", next))) .unwrap_or("".into()); let prev = self.prev.into_option() .map(|prev| format!(" prev: {:6}", format!("{:?},", prev))) .unwrap_or("".into()); write!( f, "HalfEdge {{ target: {:5}{}{} twin: {:?} }}", format!("{:?},", self.target), next, prev, self.twin, ) } } // =============================================================================================== // ===== Internal helper methods // =============================================================================================== impl<C: Config> DirectedEdgeMesh<C> { /// Makes sure the given handle points to an existing element. If that's /// not the case, this method panics. fn check_vertex(&self, vh: VertexHandle) -> Checked<VertexHandle> { if self.vertices.contains_handle(vh) { // We just checked `vh` is valid, so `Checked::new` is correct unsafe { Checked::new(vh) } } else { panic!( "{:?} was passed to a directed edge mesh, but this vertex does not \ exist in this mesh", vh, ); } } fn check_face(&self, fh: FaceHandle) { let heh = HalfEdgeHandle::first_around(fh); if !self.half_edges.contains_handle(heh) { panic!( "{:?} was passed to a directed edge mesh, but this face does not \ exist in this mesh", fh, ); } } /// Makes sure the given handle points to an existing element. If that's /// not the case, this method panics. fn checked_half_edges_around(&self, fh: FaceHandle) -> [Checked<HalfEdgeHandle>; 3] { let heh = HalfEdgeHandle::first_around(fh); if self.half_edges.contains_handle(heh) { // These handles actually exist due to the basic way this data // structure works. For each face three half edges are stored. unsafe { [ Checked::new(heh), Checked::new(HalfEdgeHandle::new(heh.idx() + 1)), Checked::new(HalfEdgeHandle::new(heh.idx() + 2)), ] } } else { panic!( "{:?} was passed to a directed edge mesh, but this face does not \ exist in this mesh", fh, ); } } /// Tries to find the half edge from `from` to `to`. Returns `None` if /// there is no edge between the two vertices. fn he_between( &self, from: Checked<VertexHandle>, to: Checked<VertexHandle>, ) -> Option<Checked<HalfEdgeHandle>> { self.circulate_around_vertex(from) .find(|&outgoing| self[outgoing].target == to) } /// Returns an iterator the circulates around the vertex `center`. The /// iterator yields outgoing half edges. fn circulate_around_vertex(&self, center: Checked<VertexHandle>) -> CwVertexCirculator<'_, C> { let state = match self[center].outgoing.into_option() { None => CwVertexCirculatorState::Empty, Some(start_he) => CwVertexCirculatorState::NonEmpty { current_he: start_he, start_he, } }; CwVertexCirculator { mesh: self, state } } fn next_he(&self, he: Checked<HalfEdgeHandle>) -> Checked<HalfEdgeHandle> { if let Some(next) = self[he].next.into_option() { return next; } // The first HE of the face is `(he.idx() / 3) * 3`. If the given HE is // the first or second half edge of the face, we just need to add one. // If it's the third HE of the face, we need to subtract 2. But how to // check if it's the third HE? `(id + 1) % 3 == 0` does that. let idx = he.idx() + 1; let next = if is_divisible_by_3(idx) { idx - 3 } else { idx }; // This handle actually exists by the logic above. Three half edges // always exist together. unsafe { Checked::new(HalfEdgeHandle::new(next)) } } fn prev_he(&self, he: Checked<HalfEdgeHandle>) -> Checked<HalfEdgeHandle> { if let Some(prev) = self[he].prev.into_option() { return prev; } // See `next_he` for explanation on this code. let prev = if is_divisible_by_3(he.idx()) { he.idx() + 2 } else { he.idx() - 1 }; // This handle actually exists by the logic above. Three half edges // always exist together. unsafe { Checked::new(HalfEdgeHandle::new(prev)) } } /// Sets the `twin` handle of both half edges to each other. fn set_twins(&mut self, a: Checked<HalfEdgeHandle>, b: Checked<HalfEdgeHandle>) { self[a].twin = EncodedTwin::twin(b); self[b].twin = EncodedTwin::twin(a); } /// Pushes the three given half edges (in order) and returns their handles. fn push_half_edge_triple( &mut self, [a, b, c]: [HalfEdge<C>; 3], ) -> [Checked<HalfEdgeHandle>; 3] { // The handles are clearly valid as we just pushed them. let ah = unsafe { Checked::new(self.half_edges.push(a)) }; let bh = unsafe { Checked::new(self.half_edges.push(b)) }; let ch = unsafe { Checked::new(self.half_edges.push(c)) }; self[ah].next = bh.into(); self[bh].next = ch.into(); self[ch].next = ah.into(); self[ah].prev = ch.into(); self[bh].prev = ah.into(); self[ch].prev = bh.into(); [ah, bh, ch] } } /// Returns `true` if and only if `idx` is divisible by 3. Basically `(id + 1) /// % 3 == 0` but hand-optimized. /// /// Unfortunately, LLVM is not smart enough to correctly optimize that code. /// That's why this is hand-micro-optimized. The divisibility-check is /// well-known and described for example here: /// http://clomont.com/efficient-divisibility-testing/ #[inline(always)] fn is_divisible_by_3(idx: hsize) -> bool { // We have to do different things depending on the handle size. The // argument types of the inner functions are `u32` and `u64` to assure this // function is correct. #[cfg(not(feature = "large-handle"))] #[inline(always)] fn check(idx: u32) -> bool { idx.wrapping_mul(0xaaaaaaab) <= 0x55555555 } #[cfg(feature = "large-handle")] #[inline(always)] fn check(idx: u64) -> bool { idx.wrapping_mul(0xaaaaaaaaaaaaaaab) <= 0x5555555555555555 } check(idx) } macro_rules! impl_index { ($handle:ident, $field:ident, $c:ident, $out:ty) => { impl<$c: Config> ops::Index<Checked<$handle>> for DirectedEdgeMesh<$c> { type Output = $out; #[inline(always)] fn index(&self, idx: Checked<$handle>) -> &Self::Output { // &self.$field[*idx] unsafe { self.$field.get_unchecked(*idx) } } } impl<$c: Config> ops::IndexMut<Checked<$handle>> for DirectedEdgeMesh<$c> { #[inline(always)] fn index_mut(&mut self, idx: Checked<$handle>) -> &mut Self::Output { // &mut self.$field[*idx] unsafe { self.$field.get_unchecked_mut(*idx) } } } } } impl_index!(VertexHandle, vertices, C, Vertex); impl_index!(HalfEdgeHandle, half_edges, C, HalfEdge<C>); // =============================================================================================== // ===== Mesh trait implementations // =============================================================================================== impl<C: Config> Mesh for DirectedEdgeMesh<C> { type FaceKind = TriFaces; type Orientable = True; fn num_vertices(&self) -> hsize { self.vertices.num_elements() } fn next_vertex_handle_from(&self, start: VertexHandle) -> Option<VertexHandle> { // TODO: optimize (start.idx()..self.vertices.next_push_handle().idx()) .map(VertexHandle::new) .find(|&vh| self.vertices.contains_handle(vh)) } fn next_face_handle_from(&self, start: FaceHandle) -> Option<FaceHandle> { // TODO: optimize (start.idx()..self.half_edges.next_push_handle().idx() / 3) .map(|i| HalfEdgeHandle::new(i * 3)) .find(|&heh| self.half_edges.contains_handle(heh)) .map(|he| he.face()) } fn last_vertex_handle(&self) -> Option<VertexHandle> { self.vertices.last_handle() } fn last_face_handle(&self) -> Option<FaceHandle> { self.half_edges.last_handle().map(|he| he.face()) } fn contains_vertex(&self, vertex: VertexHandle) -> bool { self.vertices.contains_handle(vertex) } fn num_faces(&self) -> hsize { self.half_edges.num_elements() / 3 } fn contains_face(&self, face: FaceHandle) -> bool { self.half_edges.contains_handle(HalfEdgeHandle::first_around(face)) } fn num_edges(&self) -> hsize where Self: EdgeMesh { unreachable!() } fn next_edge_handle_from(&self, _: EdgeHandle) -> Option<EdgeHandle> where Self: EdgeMesh { unreachable!() } fn last_edge_handle(&self) -> Option<EdgeHandle> where Self: EdgeMesh { unreachable!() } fn check_integrity(&self) { if self.half_edges.num_elements() % 3 != 0 { panic!("bug: number of half edges not divisible by 3"); } // Check `outgoing` handle of vertices for (vh, v) in self.vertices.iter() { if let Some(outgoing) = v.outgoing.into_option() { // Make sure outgoing handles are valid if !self.half_edges.contains_handle(*outgoing) { panic!( "bug (broken reference): [{:?}].outgoing = Some({:?}), but that \ half edge does not exist!", vh, outgoing, ); } // Check `outgoing <-> target` connection if let Some(incoming) = self[outgoing].twin.as_real_twin() { if *self[incoming].target != vh { panic!( "bug: [{:?}].outgoing = Some({:?}), but [{:?}].twin = {:?} \ and [{:?}].target = {:?} (should be {:?})", vh, *outgoing, *outgoing, *incoming, *incoming, *self[incoming].target, vh, ); } } } } // Check half edges for fh in self.face_handles() { let [heh0, heh1, heh2] = self.checked_half_edges_around(fh); for &heh in &[heh0, heh1, heh2] { // Make sure all exist if !self.half_edges.contains_handle(*heh) { panic!( "bug: {:?} (returned by `checked_half_edges_around({:?})`) \ does not exist in `self.half_edges`", heh, fh, ); } let he = self[heh]; // Make sure target vertex is not isolated if self[he.target].outgoing.is_none() { panic!( "bug: [{:?}].target = {:?}, but [{:?}].outgoing = None", heh, he.target, he.target, ); } // Make sure `twin` handles match match he.twin.decode() { Twin::Twin(twin) => { if self[twin].twin != EncodedTwin::twin(heh) { panic!( "bug: [{:?}].twin = {:?}, but [{:?}].twin = {:?}", heh, he.twin, twin, self[twin].twin, ); } } Twin::NextBoundaryHe(next) => { if !self.half_edges.contains_handle(*next) { panic!( "bug: [{:?}].twin = {:?}, but {:?} does not exist", heh, he.twin, next, ); } } } } macro_rules! check_next_prev { ($prev:ident -> $next:ident) => { if let Some(next) = self[$prev].next.into_option() { if next != $next { panic!( "[{:?}].next = {:?}, but should be {:?}", $prev, next, $next, ); } } if let Some(prev) = self[$next].prev.into_option() { if prev != $prev { panic!( "[{:?}].prev = {:?}, but should be {:?}", $next, prev, $prev, ); } } }; } check_next_prev!(heh0 -> heh1); check_next_prev!(heh1 -> heh2); check_next_prev!(heh2 -> heh0); } // Walk around boundaries. let mut visited = DenseSet::with_capacity(self.half_edges.num_elements()); for (start, he) in self.half_edges.iter() { if visited.contains_handle(start) { continue; } if he.is_boundary() { let mut heh = start; loop { // All half edges in this cycles should be not visited yet! if visited.insert(heh) { panic!( "bug: encountered {:?} while iterating on boundary starting \ from {:?}, but we already visited it!", heh, start, ); } heh = match self.half_edges[heh].twin.decode() { Twin::Twin(_) => { panic!( "bug: encountered {:?} while iterating on boundary starting \ from {:?}, but [{:?}].twin = {:?} (should be pointing \ to the next boundary edge)!", heh, start, heh, self.half_edges[heh].twin, ); } Twin::NextBoundaryHe(next) => *next, }; if heh == start { break; } } } } } } impl<C: Config> MeshMut for DirectedEdgeMesh<C> { fn add_vertex(&mut self) -> VertexHandle { self.vertices.push(Vertex { outgoing: Opt::none() }) } fn add_triangle(&mut self, [a, b, c]: [VertexHandle; 3]) -> FaceHandle { assert_ne!(a, b, "vertices of new face are not unique"); assert_ne!(a, c, "vertices of new face are not unique"); let vertices = [self.check_vertex(a), self.check_vertex(b), self.check_vertex(c)]; let outer_hes = [ self.he_between(vertices[1], vertices[0]), self.he_between(vertices[2], vertices[1]), self.he_between(vertices[0], vertices[2]), ]; // dbg!(outer_hes); // Add three new half edges. // // The `dummy_to` is only safe if we override the `twin`, `next` and // `prev` handles and never use them to index the storage. Sadly, this // is not easy to see. All fields are certainly overwritten, but it's // more difficult to show that they are not accessed before. Well, the // correctness of this whole function is based on that. let inner_hes = self.push_half_edge_triple([ unsafe { HalfEdge::dummy_to(vertices[1]) }, unsafe { HalfEdge::dummy_to(vertices[2]) }, unsafe { HalfEdge::dummy_to(vertices[0]) }, ]); // Iterate over all corners of the new triangle for idx in 0..vertices.len() { let prev_idx = idx.checked_sub(1).unwrap_or(2); let vh = vertices[idx]; let incoming_inner = inner_hes[prev_idx]; let outgoing_inner = inner_hes[idx]; let incoming_outer = outer_hes[idx]; let outgoing_outer = outer_hes[prev_idx]; let v = &self[vh]; match (incoming_outer, outgoing_outer) { // None of the half edges exists: both are boundary edges. (None, None) => { if let Some(outgoing_from_v) = v.outgoing.into_option() { // More difficult case: we are creating a multi // fan-blade vertex here. In order to correctly set the // encoded twin handles, we need to find the start of // some blade and the end of some blade. We will insert // the new blade between the two. // // // // \ ? ? ^ // \ / // start \ ? / outgoing_from_v // \ / // v / // (v) // / ^ // / \ // / \ // / F \ // v \ // ( ) ( ) // // Find the end edge of some blade. This is easy // because if a vertex is boundary, its `outgoing` edge // is a boundary edge. And since it's *outgoing* it is // the end of a blade. We need to make sure that it's a // boundary vertex, though! if let Some(start) = self[outgoing_from_v].twin.as_next_boundary_he() { // Insert new blade in between. self[outgoing_inner].twin = EncodedTwin::next_boundary_he(start); self[outgoing_from_v].twin = EncodedTwin::next_boundary_he(incoming_inner); // Regarding the `outgoing` field of `v`: before // adding this face, it was a boundary half edge. // Since we didn't add a face adjacent to it, it // still is. So we can keep it unchanged. } else { // In this case, `outgoing_from_v` was not a // boundary edge, meaning that the cycle around `v` // is already closed. That's not allowed! panic!( "new triangle {:?} would create non-manifold vertex (cycle \ around vertex {:?} already closed)", [a, b, c], v, ); } } else { // This is the easy case: `incoming` and `outgoing` are // the only edges adjacent to `v`. This also means that // `v` was isolated before and we now need to set its // `outgoing` handle. // // (v) // / ^ // / \ // / \ // / F \ // v \ // ( ) ( ) self[outgoing_inner].twin = EncodedTwin::next_boundary_he(incoming_inner); self[vh].outgoing = Opt::some(outgoing_inner); } } // The incoming half edge exists (adjacent to the face IF), but // the outgoing does not. We have to find the half edge // `before_new` whose `twin` handle points to `incoming_outer`. // Because that `twin` handle now needs to point to // `incoming_inner`. We also need to set the twin handles of // `incoming_outer` and `outgoing_inner`. // // ^ // ? ? / // ? / before_new // / // / // <-------- (v) // ^/ ^ // IF // \ // // \ // // F \ // /v \ // ( ) ( ) // // ^-- this face and // ^-- ^-- these edges are new // (Some(incoming_outer), None) => { // Find `before_new` let before_new = self.circulate_around_vertex(vh).find(|&outgoing| { self[outgoing].twin.as_next_boundary_he() == Some(incoming_outer) }).expect(NON_MANIFOLD_EDGE_ERR); // Update next boundary edge self[before_new].twin = EncodedTwin::next_boundary_he(incoming_inner); // Regarding the `outgoing` handle of the vertex: the only // old boundary edge that is not boundary anymore is // `incoming_outer`. But since this is an incoming edge, it // could not have been the `outgoing` one of the vertex. So // we don't need to change anything. } // The outgoing half edge exists (adjacent to the face OF), but // the incoming does not. The twin handle of `outgoing_outer` // points to some half edge `after_new`. `outgoing_inner.twin` // needs to point to that half edge now. Additional, the two // new real twin handles need to be set. // // \ // \ ? // after_new \ ? // \ ? // v // (v)<--------- // / ^\ // / \\ OF // / \\ // / F \\ // v \v // ( ) ( ) // // ^-- this face and // ^-- ^-- these half edges are new // (None, Some(outgoing_outer)) => { // Move the boundary-next to the new half edge. self[outgoing_inner].twin = self[outgoing_outer].twin; // We need to update the `outgoing` handle of the vertex // here. It might have been `outgoing_outer` which is now // not a boundary edge anymore. self[vh].outgoing = Opt::some(outgoing_inner); } // This can be easy or the ugliest case. The outer incoming and // outgoing half edge both exist, meaning there are faces on // either side. That means we are connecting two fan blades. If // the fan blade of `incoming` is already directly after the // fan blade of `outgoing` (speaking about the "circulate // around vertex" order), then everything is fine. // // ? // ? ? // // <-------- (v) <-------- // ^/ ^\ // IF // \\ OF // // \\ // // F \\ // /v \v // ( ) ( ) // // // BUT, if that is not the case, we need to change the order of // fan blades to match the "good" situation described above. // // Additionally, we might need to update `v.outgoing` because // it might have been `incoming.twin()` which is not a boundary // half edge anymore (after this method). (Some(incoming_outer), Some(outgoing_outer)) => { // Find the end of the blade containing IF. If there is // only one blade, the end is `outgoing_outer`. let ib_end = { let start = self.next_he(incoming_outer); let mut e = start; while let Some(twin) = self[e].twin.as_real_twin() { e = self.next_he(twin); if e == start { panic!("{}", NON_MANIFOLD_EDGE_ERR); } } e }; if self[outgoing_outer].twin.as_next_boundary_he() != Some(incoming_outer) { // Here we need to conceptually delete one fan blade // from the `next` circle around `v` and re-insert it // into the right position. We choose to "move" the fan // blade starting with `incoming_outer` (IB). // // We have to deal with four fan blades: // - IB: the blade containing `incoming_outer` (being // its start). // - OB: the blade containing `outgoing_outer` (being // its end) // - BIB (before incoming blade): the blade before IB // - AOB (after outgoing blade): the blade after OB // (`outgoing_outer.twin<as next>` is its start). // // Current situation: // // ┌────┐ ┌─────┐ ┌─────┐ ┌────┐ // +--> │ OB │ -> │ AOB │ -> ? -> │ BIB │ -> │ IB │ -> ? // | └────┘ └─────┘ └─────┘ └────┘ | // +---------------------------------------------------+ // // Find the end half edges of the blades BIB. let bib_end = self.circulate_around_vertex(vh).find(|&outgoing| { self[outgoing].twin.as_next_boundary_he() == Some(incoming_outer) }).expect("internal DEM bug: couldn't find `bib_end`"); // Here we remove the "incoming blade" from the cycle. // Situation after this assignment: // // ┌────┐ // │ IB │ -------+ // └────┘ | // v // ┌────┐ ┌─────┐ ┌─────┐ // +--> │ OB │ -> │ AOB │ -> ? -> │ BIB │ -----> ? // | └────┘ └─────┘ └─────┘ | // +---------------------------------------------+ // self[bib_end].twin = self[ib_end].twin; // Now we reinsert it again, right after the "outgoing // blade". Situation after assignment: // // // ┌────┐ // │ IB │ ------+ // └────┘ | // v // ┌────┐ ┌─────┐ ┌─────┐ // +--> │ OB │ -> │ AOB │ -> ? -> │ BIB │ -----> ? // | └────┘ └─────┘ └─────┘ | // +---------------------------------------------+ // self[ib_end].twin = self[outgoing_outer].twin; // Right now, the cycle is still a bit broken, but that // doesn't matter, because (a) the cycle will be // repaired by combining IB and OB into one blade // below, and (b) the broken cycle won't be accessed // (in this direction) before it is repaired. // To update `v.outgoing`, we luckily already know a // boundary outgoing half edge of `v`: it's the end of // BIB. self[vh].outgoing = Opt::some(bib_end); } else { // We actually don't need to do a lot here if the fan // blades are in the right order. The twin handles are // set below this large loop, so we only need to update // the `outgoing` handle of v, as it might have been // invalidated. self[vh].outgoing = Opt::some(ib_end); } } } } // Now we set the twins of the outer half edges. We couldn't before, // because code needed to read the old values. for (&outer, &inner) in outer_hes.iter().zip(&inner_hes) { if let Some(outer) = outer { self[outer].twin = EncodedTwin::twin(inner); self[inner].twin = EncodedTwin::twin(outer); } } inner_hes[0].face() } #[inline(never)] fn reserve_for_vertices(&mut self, count: hsize) { self.vertices.reserve(count); } #[inline(never)] fn reserve_for_faces(&mut self, count: hsize) { // We have three half edges per face self.half_edges.reserve(count * 3); } fn remove_all_vertices(&mut self) { assert!( self.num_faces() == 0, "call to `remove_all_vertices`, but there are faces in the mesh!", ); self.vertices.clear(); } fn remove_all_faces(&mut self) { self.half_edges.clear(); for v in self.vertices.values_mut() { v.outgoing = Opt::none(); } } fn split_face(&mut self, f: FaceHandle) -> VertexHandle { // We need to add: // - 2 new faces (so 6 new half edges) // - 1 new vertex (the "midpoint") // // Let's visualize what are are about to do. On the left is the current // situation, on the right what it looks like after this method is // done. // // (A) | (A) // / ^ | / ^ | ^ // / \ | / | | \ // / \ | / | | \ // / \ | / Z | v Y \ // / \ | / (M) \ // / \ | / ↗⟋ ↖⟍ \ // / X \ | / ⟋⟋ ⟍⟍ \ // / \ | / ⟋⟋ X ⟍⟍ \ // v \ | v ⟋↙ ⟍↘ \ // (B) ----------------> (C) | (B) ----------------> (C) // // Obtain handles of outer half edges and vertices. Two of these half // edges will be repurposed: `a -> b` and `c -> a`. They are changed to // be `m -> b` and `c -> m` instead. let [he_ab_orig, he_bc, he_ca_orig] = self.checked_half_edges_around(f); let vb = self[he_ab_orig].target; let vc = self[he_bc].target; let va = self[he_ca_orig].target; // Find other half edges that point to the edges that will be // repurposed. We need to change their references later. But we need to // obtain these edges already know as we haven't changed anything about // the mesh yet. let has_ab_as_next = self.circulate_around_vertex(vb) .find(|&outgoing| self[outgoing].twin == EncodedTwin::next_boundary_he(he_ab_orig)); let has_ca_as_next = self.circulate_around_vertex(va) .find(|&outgoing| self[outgoing].twin == EncodedTwin::next_boundary_he(he_ca_orig)); let has_ab_as_twin = self[he_ab_orig].twin.as_real_twin(); let has_ca_as_twin = self[he_ca_orig].twin.as_real_twin(); // Add new vertex "in the middle". The `Checked::new` is correct as the // handle returned by `add_vertex` is obviously valid. let vm = unsafe { Checked::new(self.add_vertex()) }; // Add new half edges for face Y. `unsafe` is fine as we will override // the twins and the handle returned by `push` is obviously valid. let [he_am, he_mc, he_ca] = self.push_half_edge_triple([ unsafe { HalfEdge::dummy_to(vm) }, unsafe { HalfEdge::dummy_to(vc) }, self[he_ca_orig], ]); // Add new half edges for face Z. `unsafe` is fine as we will override // the twins and the handle returned by `push` is obviously valid` let [he_bm, he_ma, he_ab] = self.push_half_edge_triple([ unsafe { HalfEdge::dummy_to(vm) }, unsafe { HalfEdge::dummy_to(va) }, self[he_ab_orig], ]); // Overwrite half edges of face X. `unsafe` is fine as we will override // the twins. self[he_ca_orig].target = vm; self[he_ab_orig].target = vb; let he_cm = he_ca_orig; let he_mb = he_ab_orig; // Set midpoint's `outgoing` to that edge. The midpoint is not a // boundary vertex, we don't have to pay attention to the `outgoing` // edge. self[vm].outgoing = Opt::some(he_ma); // Set twins of all inner half edges. self.set_twins(he_am, he_ma); self.set_twins(he_bm, he_mb); self.set_twins(he_cm, he_mc); // Fix links of edges that pointed to the repurposed edges. if let Some(he) = has_ab_as_next { self[he].twin = EncodedTwin::next_boundary_he(he_ab); } if let Some(he) = has_ca_as_next { // There is a special case here: the edge that referred to // `he_ca_orig` could be `he_ab_orig`! In that case we do not want // to modify the original edge, but the new one, that's actually // between `va` and `vb`. let he = if he == he_ab_orig { he_ab } else { he }; self[he].twin = EncodedTwin::next_boundary_he(he_ca); } if let Some(he) = has_ab_as_twin { self[he].twin = EncodedTwin::twin(he_ab); } if let Some(he) = has_ca_as_twin { self[he].twin = EncodedTwin::twin(he_ca); } // For two of the outer vertices we have to check if they were // referring to one of the repurposed edges before. If so, we need to // update the `outgoing` handle. if self[va].outgoing == Opt::some(he_ab_orig) { self[va].outgoing = Opt::some(he_ab); } if self[vc].outgoing == Opt::some(he_ca_orig) { self[vc].outgoing = Opt::some(he_ca); } *vm } fn remove_isolated_vertex(&mut self, v: VertexHandle) { // If `outgoing` is `None`, no other element points to `v`, so we can // safely remove it. assert!( self.vertices[v].outgoing.is_none(), "{:?} is not isolated but was passed to `remove_isolated_vertex", v, ); self.vertices.remove(v); } fn remove_face(&mut self, f: FaceHandle) { let [he0, he1, he2] = self.checked_half_edges_around(f); // We iterate over the three corners of `f` and look at each corner // individually. We have the half edges of `f` as shown here: // // ? // ? ? // (v) // / ^ // outgoing / \ incoming // / F \ // v \ // ( ) ----> ( ) // opposite_of_v // let corners = [[he0, he1, he2], [he1, he2, he0], [he2, he0, he1]]; for &[incoming, outgoing, opposite_of_v] in &corners { let v = self[incoming].target; // If we have a face on the left, we always have to set a new // `twin` value for the twin of `outgoing`. // // ? ? // // <-------- (v) // ^/ ^ // some face // \ ? // // \ // // F \ // /v \ // ( ) ( ) // if let Twin::Twin(incoming_outer) = self[outgoing].twin.decode() { // We need to change the `twin` handle of `incoming_outer` as // `outgoing` will be removed. We have basically two // possibilities: `opposite_of_v` has a real twin (left) or not // (right). // // x --- v | x --- v // \ / \ ? | \ / \ ? // ? \ / f \ | ? \ / f \ // x --- x | x --- x // \ / | ? / // ? \ / ? | / // x | x // // In either way, `opposite_of_v.twin` is what // `incoming_outer.twin` needs to pointer to. self[incoming_outer].twin = self[opposite_of_v].twin.to_next_boundary_he(); } // We handle the rest on a per case basis match (self[outgoing].twin.decode(), self[incoming].twin.decode()) { // // (v) // / ^ // / \ // / F \ // v \ // ( ) ( ) // (Twin::NextBoundaryHe(next), Twin::NextBoundaryHe(_)) if next == incoming => { self[v].outgoing = Opt::none(); } // ^ // ? ? / | \ ? ? ^ // ? / end | \ / // / | start \ ? / end // / | v / // <-------- (v) | (v) // ^/ ^ | / ^ // some face // \ | / \ // // \ | / F \ // // F \ | v \ // /v \ | ( ) ( ) // ( ) ( ) // (_, Twin::NextBoundaryHe(_)) => { // Obtain `end`, the half edge ending the blade before our // blade. let end = CwVertexCirculator::new(self, outgoing).find(|&outgoing| { self[outgoing].twin.as_next_boundary_he() == Some(incoming) }).expect("DEM bug: invalid cycle around vertex"); self[end].twin = self[outgoing].twin.to_next_boundary_he(); // We need to overwrite `v.outgoing` if it is currently // pointing to `outgoing` (which can only happen if // `outgoing` is a boundary edge). TODO: this can only // happen if we allow multi blade vertices. if self[v].outgoing == Opt::some(outgoing) { self[v].outgoing = Opt::some(end); } } // ? ? // // (v)<--------- // ? / ^\ // / \\ some face // / \\ // / F \\ // v \v // ( ) ( ) // (_, Twin::Twin(outgoing_outer)) => { // `outgoing` could have a real twin or not. But in either // case, that half edge stored in `outgoing.twin` will be // the next boundary half edge of `outgoing_outer`. self[outgoing_outer].twin = self[outgoing].twin.to_next_boundary_he(); // We just overwrite `v.outgoing` as it might have pointed // to `outgoing`. self[v].outgoing = Opt::some(outgoing_outer); } } } // Actually remove the half edges from the vector. self.half_edges.remove(*he0); self.half_edges.remove(*he1); self.half_edges.remove(*he2); } fn add_face(&mut self, _: &[VertexHandle]) -> FaceHandle where Self: PolyMesh { unreachable!() } fn flip_edge(&mut self, _: EdgeHandle) where Self: EdgeMesh + TriMesh { unreachable!() } fn split_edge_with_faces(&mut self, _: EdgeHandle) -> SplitEdgeWithFacesResult where Self: EdgeMesh + TriMesh { unreachable!() } } impl<C: Config> SupportsMultiBlade for DirectedEdgeMesh<C> {} // TODO: Think about `EdgeMesh`. // // Exposing edges is pretty trecky for this data structure. First idea: use the // lower half edge handle as edge handle. This works fine for the most part, // but can go boom when the mesh is mutated: // - `delete_face`: the lower half edge might get deleted, while the upper // remains (the full edge also logically remains in the mesh). // - `flip_edge`, ...: if edge pairs are changed, that's gonna be problematic. // // Several ideas to work around the problems: // - Be fairly restrictive about the validity of edge handles (i.e. if a face // around an edge changes, the edge handle might be invalidated). Not sure if // that's a viable solution. Maybe people need those handles to remain valid? // - Assuming a `stable-vec` implementation that retains removed elements, one // could store some data inside deleted elements. For half edges, we could // store the twin, even if that half edge is deleted. That way, an edge // handle pointing to a removed half edge could still access the other half. // // Also, a notable disadvantage is that edge handles are not really consecutive // this way. One can assume that the two halves of an edge are approximately // created at the same time. Meaning that highest edge handle has an index // approximately twice the number of edges. This makes storing edge attributes // in arrays rather inefficient and wasteful. // // One completely different approach is to store additional data. For example, // a simple edge array which points to its two half edges. This would be fairly // memory inefficient though. Maybe there is some smart way to encode this? // This could of course be configured via `Config`.
39.729195
99
0.461017
5028af1756fb4bae3d204572335596b626e61da9
4,165
use libc::*; use linux; #[cfg(not(test))] mod not_test { // Syscall number is passed in %eax, syscall arguments in %ebx, %ecx, %edx, // %esi, %edi. The arguments are // (flags: c_ulong, // %ebx // child_stack: *mut c_void, // %ecx // ptid: *mut c_int, // %edx // newtls: c_ulong, // %esi // ctid: *mut c_int) // %edi // // No registers are clobbered, %eax gets the return value. // // Only %eax, %ecx and %edx are caller-saved, so we must restore the value // of all other registers before returning. // // The cdecl calling convention passes arguments on the stack, right to // left. Since we push %ebp onto the stack in the very beginning, all // offsets are increased by 4. The arguments are // (fn_: extern "C" fn(*mut c_void) -> *mut c_void, // 8(%ebp) // child_stack: *mut c_void, // 12(%ebp) // flags: c_ulong, // 16(%ebp) // arg: *mut c_void, // 20(%ebp) // ptid: *mut pid_t, // 24(%ebp) // newtls: *mut c_void, // 28(%ebp) // ctid: *mut pid_t) // 32(%ebp) // // Both ABIs return the function result in %eax. // // This means we need the following moves: // 16(%ebp) -> %ebx // flags // 12(%ebp) -> %ecx // child_stack // 24(%ebp) -> %edx // ptid // fancy -> %esi // newtls // 32(%ebp) -> %edi // ctid // // We need to create a struct of type `struct user_desc` (see `clone(2)` // and `set_thread_area(2)`) and store it in %esi. We do it by pushing it // onto the parent stack. // // We save `fn_` in %ebp. global_asm!(" .globl __steed_clone __steed_clone: # Stack frame push %ebp mov %esp,%ebp # Save registers push %ebx push %esi push %edi mov 12(%ebp),%ecx # child_stack and $-16,%ecx # Align the stack # Push the parameter sub $16,%ecx # Keep the stack aligned mov 20(%ebp),%edi # arg mov %edi,(%ecx) # Construct a struct of type `user_desc` # Bitfield, according to glibc: # seg_32bit:1 = 1 # contents:2 = 0 # read_exec_only:1 = 0 # limit_in_pages:1 = 1 # seg_not_present:1 = 0 # useable:1 = 1 push $0x51 push $0xfffff # limit push 28(%ebp) # base_addr xor %eax,%eax mov %gs,%ax shr $3,%eax push %eax # entry_number mov $120,%eax # CLONE mov 16(%ebp),%ebx # flags mov 24(%ebp),%edx # ptid mov %esp,%esi # newtls mov 32(%ebp),%edi # ctid mov 8(%ebp),%ebp # fn_ int $0x80 # CLONE returns 0 in the child thread, return if we're the parent. test %eax,%eax jnz 1f mov %ebp,%eax # fn_ # Mark the lowest stack frame xor %ebp,%ebp # arg is already on the stack call *%eax mov %eax,%ebx # status mov $1,%eax # EXIT int $0x80 # Unreachable hlt 1: # Pop the struct add $16,%esp # Restore registers pop %edi pop %esi pop %ebx # Stack frame pop %ebp ret "); } pub unsafe fn set_thread_pointer(thread_data: *mut ()) { let mut user_desc = linux::user_desc { entry_number: -1i32 as u32, base_addr: thread_data as u32, limit: 0xfffff, // This `flags` value is explained in the `asm!` block of // `__steed_clone` above. flags: 0x51, }; let result = linux::set_thread_area(&mut user_desc); if result < 0 { panic!("set_thread_pointer: set_thread_area: {}", result); } asm!("mov $0,%gs"::"r"(((user_desc.entry_number << 3) | 3) as u16)::"volatile"); } #[inline(always)] pub unsafe fn thread_self() -> *mut thread { let result; asm!("mov %gs:0,$0":"=r"(result)); result }
27.582781
84
0.50012
ed430f79ff411cd056c0000a2287338bf260ba2d
5,624
// Copyright 2020 Arm Limited (or its affiliates). All rights reserved. // Copyright © 2020, Oracle and/or its affiliates. // // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //! Implements platform specific functionality. //! Supported platforms: x86_64, aarch64. #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; #[cfg(target_arch = "x86_64")] use crate::x86_64::SgxEpcSection; use std::collections::BTreeMap; use std::fmt; use std::result; use std::sync::Arc; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; use vm_migration::VersionMapped; type GuestMemoryMmap = vm_memory::GuestMemoryMmap<vm_memory::bitmap::AtomicBitmap>; type GuestRegionMmap = vm_memory::GuestRegionMmap<vm_memory::bitmap::AtomicBitmap>; /// Type for returning error code. #[derive(Debug)] pub enum Error { #[cfg(target_arch = "x86_64")] /// X86_64 specific error triggered during system configuration. X86_64Setup(x86_64::Error), #[cfg(target_arch = "aarch64")] /// AArch64 specific error triggered during system configuration. AArch64Setup(aarch64::Error), /// The zero page extends past the end of guest_mem. ZeroPagePastRamEnd, /// Error writing the zero page of guest memory. ZeroPageSetup(vm_memory::GuestMemoryError), /// The memory map table extends past the end of guest memory. MemmapTablePastRamEnd, /// Error writing memory map table to guest memory. MemmapTableSetup, /// The hvm_start_info structure extends past the end of guest memory. StartInfoPastRamEnd, /// Error writing hvm_start_info to guest memory. StartInfoSetup, /// Failed to compute initramfs address. InitramfsAddress, /// Error writing module entry to guest memory. ModlistSetup(vm_memory::GuestMemoryError), /// RSDP Beyond Guest Memory RsdpPastRamEnd, } /// Type for returning public functions outcome. pub type Result<T> = result::Result<T, Error>; /// Type for memory region types. #[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize, Versionize)] pub enum RegionType { /// RAM type Ram, /// SubRegion memory region. /// A SubRegion is a memory region sub-region, allowing for a region /// to be split into sub regions managed separately. /// For example, the x86 32-bit memory hole is a SubRegion. SubRegion, /// Reserved type. /// A Reserved memory region is one that should not be used for memory /// allocation. This type can be used to prevent the VMM from allocating /// memory ranges in a specific address range. Reserved, } impl VersionMapped for RegionType {} /// Module for aarch64 related functionality. #[cfg(target_arch = "aarch64")] pub mod aarch64; #[cfg(target_arch = "aarch64")] pub use aarch64::{ arch_memory_regions, configure_system, configure_vcpu, fdt::DeviceInfoForFdt, get_host_cpu_phys_bits, get_kernel_start, get_uefi_start, initramfs_load_addr, layout, layout::CMDLINE_MAX_SIZE, layout::IRQ_BASE, uefi, EntryPoint, }; #[cfg(target_arch = "x86_64")] pub mod x86_64; #[cfg(target_arch = "x86_64")] pub use x86_64::{ arch_memory_regions, configure_system, configure_vcpu, generate_common_cpuid, get_host_cpu_phys_bits, initramfs_load_addr, layout, layout::CMDLINE_MAX_SIZE, layout::CMDLINE_START, regs, CpuidFeatureEntry, EntryPoint, }; /// Safe wrapper for `sysconf(_SC_PAGESIZE)`. #[cfg(target_arch = "x86_64")] #[inline(always)] fn pagesize() -> usize { // Trivially safe unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } } #[derive(Clone, Default)] pub struct NumaNode { pub memory_regions: Vec<Arc<GuestRegionMmap>>, pub hotplug_regions: Vec<Arc<GuestRegionMmap>>, pub cpus: Vec<u8>, pub distances: BTreeMap<u32, u8>, pub memory_zones: Vec<String>, #[cfg(target_arch = "x86_64")] pub sgx_epc_sections: Vec<SgxEpcSection>, } pub type NumaNodes = BTreeMap<u32, NumaNode>; /// Type for passing information about the initramfs in the guest memory. pub struct InitramfsConfig { /// Load address of initramfs in guest memory pub address: vm_memory::GuestAddress, /// Size of initramfs in guest memory pub size: usize, } /// Types of devices that can get attached to this platform. #[derive(Clone, Debug, PartialEq, Eq, Hash, Copy)] pub enum DeviceType { /// Device Type: Virtio. Virtio(u32), /// Device Type: Serial. #[cfg(target_arch = "aarch64")] Serial, /// Device Type: RTC. #[cfg(target_arch = "aarch64")] Rtc, /// Device Type: GPIO. #[cfg(target_arch = "aarch64")] Gpio, } /// Default (smallest) memory page size for the supported architectures. pub const PAGE_SIZE: usize = 4096; impl fmt::Display for DeviceType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } } /// Structure to describe MMIO device information #[derive(Clone, Debug)] #[cfg(target_arch = "aarch64")] pub struct MmioDeviceInfo { pub addr: u64, pub irq: u32, } /// Structure to describe PCI space information #[derive(Clone, Debug)] #[cfg(target_arch = "aarch64")] pub struct PciSpaceInfo { pub pci_segment_id: u16, pub mmio_config_address: u64, pub pci_device_space_start: u64, pub pci_device_space_size: u64, } #[cfg(target_arch = "aarch64")] impl DeviceInfoForFdt for MmioDeviceInfo { fn addr(&self) -> u64 { self.addr } fn irq(&self) -> u32 { self.irq } fn length(&self) -> u64 { 4096 } }
30.236559
90
0.703058
bb5a5f04ae167f8a6bf1e059e87e9a56b6dfd9d4
404
//! Tests auto-converted from "sass-spec/spec/non_conformant/extend-tests/161_test_combinator_unification_double_plus.hrx" #[test] #[ignore] // unexepected error fn test() { assert_eq!( crate::rsass( ".a + x {a: b}\ \n.a.b + y {@extend x}\ \n" ) .unwrap(), ".a + x, .a.b + y {\ \n a: b;\ \n}\ \n" ); }
21.263158
122
0.465347
28978b4c9030013c0764993e885575e8ea945e0c
3,861
// Copyright (C) 2019, Cloudflare, Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::octets; use super::Error; use super::Result; use self::table::DECODE_TABLE; use self::table::ENCODE_TABLE; pub fn decode(b: &mut octets::Octets) -> Result<Vec<u8>> { // Max compression ratio is >= 0.5 let mut out = Vec::with_capacity(b.len() << 1); let mut decoder = Decoder::new(); while b.cap() > 0 { let byte = b.get_u8()?; if let Some(b) = decoder.decode4(byte >> 4)? { out.push(b); } if let Some(b) = decoder.decode4(byte & 0xf)? { out.push(b); } } if !decoder.is_final() { return Err(Error::InvalidHuffmanEncoding); } Ok(out) } pub fn encode(src: &[u8], out: &mut octets::Octets) -> Result<()> { let mut bits: u64 = 0; let mut bits_left = 40; for &b in src { let (nbits, code) = ENCODE_TABLE[b as usize]; bits |= code << (bits_left - nbits); bits_left -= nbits; while bits_left <= 32 { out.put_u8((bits >> 32) as u8)?; bits <<= 8; bits_left += 8; } } if bits_left != 40 { // This writes the EOS token bits |= (1 << bits_left) - 1; out.put_u8((bits >> 32) as u8)?; } Ok(()) } pub fn encode_output_length(src: &[u8]) -> Result<usize> { let mut bits: usize = 0; for &b in src { let (nbits, _) = ENCODE_TABLE[b as usize]; bits += nbits; } let mut len = bits / 8; if bits & 7 != 0 { len += 1; } Ok(len) } struct Decoder { state: usize, maybe_eos: bool, } impl Decoder { fn new() -> Decoder { Decoder { state: 0, maybe_eos: false, } } // Decodes 4 bits fn decode4(&mut self, input: u8) -> Result<Option<u8>> { const MAYBE_EOS: u8 = 1; const DECODED: u8 = 2; const ERROR: u8 = 4; // (next-state, byte, flags) let (next, byte, flags) = DECODE_TABLE[self.state][input as usize]; if flags & ERROR == ERROR { // Data followed the EOS marker return Err(Error::InvalidHuffmanEncoding); } let ret = if flags & DECODED == DECODED { Some(byte) } else { None }; self.state = next; self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS; Ok(ret) } fn is_final(&self) -> bool { self.state == 0 || self.maybe_eos } } mod table;
25.74
79
0.592593
fc7e9807e5f809cbb54fb8d09671db3900cc866f
5,856
//! Penetration depth computation algorithm approximating the Minkowskis sum. use num::{Bounded, Zero}; use na::{self, Unit}; use crate::shape::{AnnotatedPoint, MinkowskiSum, Reflection}; use crate::shape::SupportMap; use crate::query::algorithms::gjk; use crate::query::algorithms::simplex::Simplex; use crate::math::{Isometry, Point, Vector}; /// Computes the closest points between two implicit inter-penetrating shapes. Returns None if the /// shapes are not in penetration. This can be used as a fallback algorithm for the GJK algorithm. pub fn closest_points<N, S, G1: ?Sized, G2: ?Sized>( m1: &Isometry<N>, g1: &G1, m2: &Isometry<N>, g2: &G2, simplex: &mut S, ) -> Option<(Point<N>, Point<N>, Unit<Vector<N>>)> where N: RealField + Copy, M: Isometry<P>, S: Simplex<AnnotatedPoint<P>>, G1: SupportMap<N>, G2: SupportMap<N>, { let reflect2 = Reflection::new(g2); let cso = MinkowskiSum::new(m1, g1, m2, &reflect2); // find an approximation of the smallest penetration direction let mut best_dir: Vector<N> = na::zero(); let mut min_dist = Bounded::max_value(); Vector<N>::sample_sphere(|sample: Vector<N>| { let support = cso.local_support_point( &sample); let distance = sample.dot(&support.coords); if distance < min_dist { best_dir = sample; min_dist = distance; } }); let extra_shift = na::convert(0.01f64); // FIXME: do not hard-code the extra shift? let shift = best_dir * (min_dist + extra_shift); let tm2 = m2.append_translation(&Isometry<N>::Translation::from(shift).unwrap()); simplex.modify_pnts(&|pt| pt.translate_2(&(-shift))); match gjk::closest_points(m1, g1, &tm2, g2, simplex) { None => None, // panic!("Internal error: the origin was inside of the Simplex during phase 1."), Some((p1, p2)) => { // NOTE: at this point, p1 must *not* be concidered as a good contact point for the // first object. For example: // // // +-------------+ // | | // | obj2 | // +-------|-----+ | // | +-----+-------+ // | obj1 | // | | // +-------------+ // // May Become after shifting: // +-------------+ // | | // | obj2 | // | | // p2 -> x-------------+ // +-------------x <- p1 // | | // | obj1 | // | | // +-------------+ // // Thus, after un-shifting, p1 becomes clearly invalid: // // +-------------+ // | | // | obj2 | // +-------|-----+ <- p1 | // | p2 -> +-----+-------+ // | obj1 | // | | // +-------------+ let (normal, dist_err) = Unit::new_and_get(p2 - p1); if !dist_err.is_zero() { let p2 = p2 + (-shift); let center = na::center(&p1, &p2); let nmin_dist = normal.dot(&best_dir) * (min_dist + extra_shift); let p2 = center + (-*normal) * (nmin_dist - dist_err); Some((center, p2, normal)) } else { // FIXME: something went wrong here. None } } } } /// Projects the origin on a support-mapped shape. /// /// The origin is assumed to be inside of the shape. pub fn project_origin<N, S, G>(m: &Isometry<N>, g: &G, simplex: &mut S) -> Option<P> where N: RealField + Copy, M: Isometry<P>, S: Simplex<N>, G: SupportMap<N>, { // find an approximation of the smallest penetration direction let mut best_dir: Vector<N> = na::zero(); let mut min_dist = Bounded::max_value(); Vector<N>::sample_sphere(|sample: Vector<N>| { let support = g.support_point(m, &sample); let distance = sample.dot(&support.coords); if distance < min_dist { best_dir = sample; min_dist = distance; } }); let extra_shift = na::convert(0.01f64); // FIXME: do not hard-code the extra shift? let shift = best_dir * (min_dist + extra_shift); let tm = m.append_translation(&Isometry<N>::Translation::from(-shift).unwrap()); simplex.modify_pnts(&|pt| *pt = *pt + (-shift)); match gjk::project_origin(&tm, g, simplex) { None => None, // panic!("Internal error: the origin was inside of the Simplex during phase 1."), Some(p) => { let mut normal = -p.coords; let dist_err = normal.normalize_mut(); if !dist_err.is_zero() { let nmin_dist = normal.dot(&best_dir) * (min_dist + extra_shift); Some(Point::origin() + normal * (nmin_dist - dist_err)) } else { // FIXME: something went wrong here. None } } } }
37.299363
104
0.433402
213ae890d7d096c7eee8389806d728b942a72354
134,871
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! SQL Query Planner (produces logical plan from SQL AST) use std::collections::HashSet; use std::str::FromStr; use std::sync::Arc; use std::{convert::TryInto, vec}; use crate::catalog::TableReference; use crate::datasource::TableProvider; use crate::logical_plan::window_frames::{WindowFrame, WindowFrameUnits}; use crate::logical_plan::Expr::Alias; use crate::logical_plan::{ and, lit, union_with_alias, Column, DFSchema, Expr, LogicalPlan, LogicalPlanBuilder, Operator, PlanType, StringifiedPlan, ToDFSchema, }; use crate::prelude::JoinType; use crate::scalar::ScalarValue; use crate::{ error::{DataFusionError, Result}, physical_plan::udaf::AggregateUDF, }; use crate::{ physical_plan::udf::ScalarUDF, physical_plan::{aggregates, functions, window_functions}, sql::parser::{CreateExternalTable, FileType, Statement as DFStatement}, }; use arrow::datatypes::*; use hashbrown::HashMap; use sqlparser::ast::{ BinaryOperator, DataType as SQLDataType, DateTimeField, Expr as SQLExpr, FunctionArg, Ident, Join, JoinConstraint, JoinOperator, ObjectName, Query, Select, SelectItem, SetExpr, SetOperator, ShowStatementFilter, TableFactor, TableWithJoins, UnaryOperator, Value, }; use sqlparser::ast::{ColumnDef as SQLColumnDef, ColumnOption}; use sqlparser::ast::{OrderByExpr, Statement}; use sqlparser::parser::ParserError::ParserError; use super::{ parser::DFParser, utils::{ can_columns_satisfy_exprs, expand_wildcard, expr_as_column_expr, extract_aliases, find_aggregate_exprs, find_column_exprs, find_window_exprs, group_window_expr_by_sort_keys, rebase_expr, resolve_aliases_to_exprs, resolve_positions_to_exprs, }, }; /// The ContextProvider trait allows the query planner to obtain meta-data about tables and /// functions referenced in SQL statements pub trait ContextProvider { /// Getter for a datasource fn get_table_provider(&self, name: TableReference) -> Option<Arc<dyn TableProvider>>; /// Getter for a UDF description fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>>; /// Getter for a UDAF description fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>>; } /// SQL query planner pub struct SqlToRel<'a, S: ContextProvider> { schema_provider: &'a S, } impl<'a, S: ContextProvider> SqlToRel<'a, S> { /// Create a new query planner pub fn new(schema_provider: &'a S) -> Self { SqlToRel { schema_provider } } /// Generate a logical plan from an DataFusion SQL statement pub fn statement_to_plan(&self, statement: &DFStatement) -> Result<LogicalPlan> { match statement { DFStatement::CreateExternalTable(s) => self.external_table_to_plan(s), DFStatement::Statement(s) => self.sql_statement_to_plan(s), } } /// Generate a logical plan from an SQL statement pub fn sql_statement_to_plan(&self, sql: &Statement) -> Result<LogicalPlan> { match sql { Statement::Explain { verbose, statement, analyze: _, } => self.explain_statement_to_plan(*verbose, statement), Statement::Query(query) => self.query_to_plan(query), Statement::ShowVariable { variable } => self.show_variable_to_plan(variable), Statement::ShowColumns { extended, full, table_name, filter, } => self.show_columns_to_plan(*extended, *full, table_name, filter.as_ref()), _ => Err(DataFusionError::NotImplemented( "Only SELECT statements are implemented".to_string(), )), } } /// Generate a logic plan from an SQL query pub fn query_to_plan(&self, query: &Query) -> Result<LogicalPlan> { self.query_to_plan_with_alias(query, None, &mut HashMap::new()) } /// Generate a logic plan from an SQL query with optional alias pub fn query_to_plan_with_alias( &self, query: &Query, alias: Option<String>, ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<LogicalPlan> { let set_expr = &query.body; if let Some(with) = &query.with { // Process CTEs from top to bottom // do not allow self-references for cte in &with.cte_tables { // create logical plan & pass backreferencing CTEs let logical_plan = self.query_to_plan_with_alias( &cte.query, Some(cte.alias.name.value.clone()), &mut ctes.clone(), )?; ctes.insert(cte.alias.name.value.clone(), logical_plan); } } let plan = self.set_expr_to_plan(set_expr, alias, ctes)?; let plan = self.order_by(plan, &query.order_by)?; self.limit(plan, &query.limit) } fn set_expr_to_plan( &self, set_expr: &SetExpr, alias: Option<String>, ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<LogicalPlan> { match set_expr { SetExpr::Select(s) => self.select_to_plan(s.as_ref(), ctes), SetExpr::SetOperation { op, left, right, all, } => match (op, all) { (SetOperator::Union, true) => { let left_plan = self.set_expr_to_plan(left.as_ref(), None, ctes)?; let right_plan = self.set_expr_to_plan(right.as_ref(), None, ctes)?; union_with_alias(left_plan, right_plan, alias) } _ => Err(DataFusionError::NotImplemented(format!( "Only UNION ALL is supported, found {}", op ))), }, _ => Err(DataFusionError::NotImplemented(format!( "Query {} not implemented yet", set_expr ))), } } /// Generate a logical plan from a CREATE EXTERNAL TABLE statement pub fn external_table_to_plan( &self, statement: &CreateExternalTable, ) -> Result<LogicalPlan> { let CreateExternalTable { name, columns, file_type, has_header, location, } = statement; // semantic checks match *file_type { FileType::CSV => { if columns.is_empty() { return Err(DataFusionError::Plan( "Column definitions required for CSV files. None found".into(), )); } } FileType::Parquet => { if !columns.is_empty() { return Err(DataFusionError::Plan( "Column definitions can not be specified for PARQUET files." .into(), )); } } FileType::NdJson => {} }; let schema = self.build_schema(columns)?; Ok(LogicalPlan::CreateExternalTable { schema: schema.to_dfschema_ref()?, name: name.clone(), location: location.clone(), file_type: *file_type, has_header: *has_header, }) } /// Generate a plan for EXPLAIN ... that will print out a plan /// pub fn explain_statement_to_plan( &self, verbose: bool, statement: &Statement, ) -> Result<LogicalPlan> { let plan = self.sql_statement_to_plan(statement)?; let stringified_plans = vec![StringifiedPlan::new( PlanType::LogicalPlan, format!("{:#?}", plan), )]; let schema = LogicalPlan::explain_schema(); let plan = Arc::new(plan); Ok(LogicalPlan::Explain { verbose, plan, stringified_plans, schema: schema.to_dfschema_ref()?, }) } fn build_schema(&self, columns: &[SQLColumnDef]) -> Result<Schema> { let mut fields = Vec::new(); for column in columns { let data_type = self.make_data_type(&column.data_type)?; let allow_null = column .options .iter() .any(|x| x.option == ColumnOption::Null); fields.push(Field::new(&column.name.value, data_type, allow_null)); } Ok(Schema::new(fields)) } /// Maps the SQL type to the corresponding Arrow `DataType` fn make_data_type(&self, sql_type: &SQLDataType) -> Result<DataType> { match sql_type { SQLDataType::BigInt => Ok(DataType::Int64), SQLDataType::Int => Ok(DataType::Int32), SQLDataType::SmallInt => Ok(DataType::Int16), SQLDataType::Char(_) | SQLDataType::Varchar(_) | SQLDataType::Text => { Ok(DataType::Utf8) } SQLDataType::Decimal(_, _) => Ok(DataType::Float64), SQLDataType::Float(_) => Ok(DataType::Float32), SQLDataType::Real | SQLDataType::Double => Ok(DataType::Float64), SQLDataType::Boolean => Ok(DataType::Boolean), SQLDataType::Date => Ok(DataType::Date32), SQLDataType::Time => Ok(DataType::Time64(TimeUnit::Millisecond)), SQLDataType::Timestamp => Ok(DataType::Timestamp(TimeUnit::Nanosecond, None)), _ => Err(DataFusionError::NotImplemented(format!( "The SQL data type {:?} is not implemented", sql_type ))), } } fn plan_from_tables( &self, from: &[TableWithJoins], ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<Vec<LogicalPlan>> { match from.len() { 0 => Ok(vec![LogicalPlanBuilder::empty(true).build()?]), _ => from .iter() .map(|t| self.plan_table_with_joins(t, ctes)) .collect::<Result<Vec<_>>>(), } } fn plan_table_with_joins( &self, t: &TableWithJoins, ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<LogicalPlan> { let left = self.create_relation(&t.relation, ctes)?; match t.joins.len() { 0 => Ok(left), n => { let mut left = self.parse_relation_join(left, &t.joins[0], ctes)?; for i in 1..n { left = self.parse_relation_join(left, &t.joins[i], ctes)?; } Ok(left) } } } fn parse_relation_join( &self, left: LogicalPlan, join: &Join, ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<LogicalPlan> { let right = self.create_relation(&join.relation, ctes)?; match &join.join_operator { JoinOperator::LeftOuter(constraint) => { self.parse_join(left, &right, constraint, JoinType::Left) } JoinOperator::RightOuter(constraint) => { self.parse_join(left, &right, constraint, JoinType::Right) } JoinOperator::Inner(constraint) => { self.parse_join(left, &right, constraint, JoinType::Inner) } JoinOperator::FullOuter(constraint) => { self.parse_join(left, &right, constraint, JoinType::Full) } JoinOperator::CrossJoin => self.parse_cross_join(left, &right), other => Err(DataFusionError::NotImplemented(format!( "Unsupported JOIN operator {:?}", other ))), } } fn parse_cross_join( &self, left: LogicalPlan, right: &LogicalPlan, ) -> Result<LogicalPlan> { LogicalPlanBuilder::from(left).cross_join(right)?.build() } fn parse_join( &self, left: LogicalPlan, right: &LogicalPlan, constraint: &JoinConstraint, join_type: JoinType, ) -> Result<LogicalPlan> { match constraint { JoinConstraint::On(sql_expr) => { let mut keys: Vec<(Column, Column)> = vec![]; let join_schema = left.schema().join(right.schema())?; // parse ON expression let expr = self.sql_to_rex(sql_expr, &join_schema)?; // extract join keys extract_join_keys(&expr, &mut keys)?; let (left_keys, right_keys): (Vec<Column>, Vec<Column>) = keys.into_iter().unzip(); // return the logical plan representing the join LogicalPlanBuilder::from(left) .join(right, join_type, left_keys, right_keys)? .build() } JoinConstraint::Using(idents) => { let keys: Vec<Column> = idents .iter() .map(|x| Column::from_name(x.value.clone())) .collect(); LogicalPlanBuilder::from(left) .join_using(right, join_type, keys)? .build() } JoinConstraint::Natural => { // https://issues.apache.org/jira/browse/ARROW-10727 Err(DataFusionError::NotImplemented( "NATURAL JOIN is not supported (https://issues.apache.org/jira/browse/ARROW-10727)".to_string(), )) } JoinConstraint::None => Err(DataFusionError::NotImplemented( "NONE constraint is not supported".to_string(), )), } } fn create_relation( &self, relation: &TableFactor, ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<LogicalPlan> { match relation { TableFactor::Table { name, alias, .. } => { let table_name = name.to_string(); let cte = ctes.get(&table_name); match ( cte, self.schema_provider.get_table_provider(name.try_into()?), ) { (Some(cte_plan), _) => Ok(cte_plan.clone()), (_, Some(provider)) => LogicalPlanBuilder::scan( // take alias into account to support `JOIN table1 as table2` alias .as_ref() .map(|a| a.name.value.as_str()) .unwrap_or(&table_name), provider, None, )? .build(), (None, None) => Err(DataFusionError::Plan(format!( "Table or CTE with name '{}' not found", name ))), } } TableFactor::Derived { subquery, alias, .. } => self.query_to_plan_with_alias( subquery, alias.as_ref().map(|a| a.name.value.to_string()), ctes, ), TableFactor::NestedJoin(table_with_joins) => { self.plan_table_with_joins(table_with_joins, ctes) } // @todo Support TableFactory::TableFunction? _ => Err(DataFusionError::NotImplemented(format!( "Unsupported ast node {:?} in create_relation", relation ))), } } /// Generate a logic plan from an SQL select fn select_to_plan( &self, select: &Select, ctes: &mut HashMap<String, LogicalPlan>, ) -> Result<LogicalPlan> { let plans = self.plan_from_tables(&select.from, ctes)?; let plan = match &select.selection { Some(predicate_expr) => { // build join schema let mut fields = vec![]; for plan in &plans { fields.extend_from_slice(plan.schema().fields()); } let join_schema = DFSchema::new(fields)?; let filter_expr = self.sql_to_rex(predicate_expr, &join_schema)?; // look for expressions of the form `<column> = <column>` let mut possible_join_keys = vec![]; extract_possible_join_keys(&filter_expr, &mut possible_join_keys)?; let mut all_join_keys = HashSet::new(); let mut left = plans[0].clone(); for right in plans.iter().skip(1) { let left_schema = left.schema(); let right_schema = right.schema(); let mut join_keys = vec![]; for (l, r) in &possible_join_keys { if left_schema.field_from_qualified_column(l).is_ok() && right_schema.field_from_qualified_column(r).is_ok() { join_keys.push((l.clone(), r.clone())); } else if left_schema.field_from_qualified_column(r).is_ok() && right_schema.field_from_qualified_column(l).is_ok() { join_keys.push((r.clone(), l.clone())); } } if join_keys.is_empty() { left = LogicalPlanBuilder::from(left).cross_join(right)?.build()?; } else { let left_keys: Vec<Column> = join_keys.iter().map(|(l, _)| l.clone()).collect(); let right_keys: Vec<Column> = join_keys.iter().map(|(_, r)| r.clone()).collect(); let builder = LogicalPlanBuilder::from(left); left = builder .join(right, JoinType::Inner, left_keys, right_keys)? .build()?; } all_join_keys.extend(join_keys); } // remove join expressions from filter match remove_join_expressions(&filter_expr, &all_join_keys)? { Some(filter_expr) => { LogicalPlanBuilder::from(left).filter(filter_expr)?.build() } _ => Ok(left), } } None => { if plans.len() == 1 { Ok(plans[0].clone()) } else { let mut left = plans[0].clone(); for right in plans.iter().skip(1) { left = LogicalPlanBuilder::from(left).cross_join(right)?.build()?; } Ok(left) } } }; let plan = plan?; // The SELECT expressions, with wildcards expanded. let select_exprs = self.prepare_select_exprs(&plan, &select.projection)?; // having and group by clause may reference aliases defined in select projection let projected_plan = self.project(plan.clone(), select_exprs.clone())?; let mut combined_schema = (**projected_plan.schema()).clone(); combined_schema.merge(plan.schema()); // this alias map is resolved and looked up in both having exprs and group by exprs let alias_map = extract_aliases(&select_exprs); // Optionally the HAVING expression. let having_expr_opt = select .having .as_ref() .map::<Result<Expr>, _>(|having_expr| { let having_expr = self.sql_expr_to_logical_expr(having_expr, &combined_schema)?; // This step "dereferences" any aliases in the HAVING clause. // // This is how we support queries with HAVING expressions that // refer to aliased columns. // // For example: // // SELECT c1 AS m FROM t HAVING m > 10; // SELECT c1, MAX(c2) AS m FROM t GROUP BY c1 HAVING m > 10; // // are rewritten as, respectively: // // SELECT c1 AS m FROM t HAVING c1 > 10; // SELECT c1, MAX(c2) AS m FROM t GROUP BY c1 HAVING MAX(c2) > 10; // resolve_aliases_to_exprs(&having_expr, &alias_map) }) .transpose()?; // The outer expressions we will search through for // aggregates. Aggregates may be sourced from the SELECT... let mut aggr_expr_haystack = select_exprs.clone(); // ... or from the HAVING. if let Some(having_expr) = &having_expr_opt { aggr_expr_haystack.push(having_expr.clone()); } // All of the aggregate expressions (deduplicated). let aggr_exprs = find_aggregate_exprs(&aggr_expr_haystack); let group_by_exprs = select .group_by .iter() .map(|e| { let group_by_expr = self.sql_expr_to_logical_expr(e, &combined_schema)?; let group_by_expr = resolve_aliases_to_exprs(&group_by_expr, &alias_map)?; let group_by_expr = resolve_positions_to_exprs(&group_by_expr, &select_exprs) .unwrap_or(group_by_expr); self.validate_schema_satisfies_exprs( plan.schema(), &[group_by_expr.clone()], )?; Ok(group_by_expr) }) .collect::<Result<Vec<Expr>>>()?; let (plan, select_exprs_post_aggr, having_expr_post_aggr_opt) = if !group_by_exprs .is_empty() || !aggr_exprs.is_empty() { self.aggregate( plan, &select_exprs, &having_expr_opt, group_by_exprs, aggr_exprs, )? } else { if let Some(having_expr) = &having_expr_opt { let available_columns = select_exprs .iter() .map(|expr| expr_as_column_expr(expr, &plan)) .collect::<Result<Vec<Expr>>>()?; // Ensure the HAVING expression is using only columns // provided by the SELECT. if !can_columns_satisfy_exprs(&available_columns, &[having_expr.clone()])? { return Err(DataFusionError::Plan( "Having references column(s) not provided by the select" .to_owned(), )); } } (plan, select_exprs, having_expr_opt) }; let plan = if let Some(having_expr_post_aggr) = having_expr_post_aggr_opt { LogicalPlanBuilder::from(plan) .filter(having_expr_post_aggr)? .build()? } else { plan }; // window function let window_func_exprs = find_window_exprs(&select_exprs_post_aggr); let plan = if window_func_exprs.is_empty() { plan } else { self.window(plan, window_func_exprs)? }; let plan = if select.distinct { return LogicalPlanBuilder::from(plan) .aggregate(select_exprs_post_aggr, vec![])? .build(); } else { plan }; self.project(plan, select_exprs_post_aggr) } /// Returns the `Expr`'s corresponding to a SQL query's SELECT expressions. /// /// Wildcards are expanded into the concrete list of columns. fn prepare_select_exprs( &self, plan: &LogicalPlan, projection: &[SelectItem], ) -> Result<Vec<Expr>> { let input_schema = plan.schema(); Ok(projection .iter() .map(|expr| self.sql_select_to_rex(expr, input_schema)) .collect::<Result<Vec<Expr>>>()? .iter() .flat_map(|expr| expand_wildcard(expr, input_schema)) .collect::<Vec<Expr>>()) } /// Wrap a plan in a projection fn project(&self, input: LogicalPlan, expr: Vec<Expr>) -> Result<LogicalPlan> { self.validate_schema_satisfies_exprs(input.schema(), &expr)?; LogicalPlanBuilder::from(input).project(expr)?.build() } /// Wrap a plan in a window fn window(&self, input: LogicalPlan, window_exprs: Vec<Expr>) -> Result<LogicalPlan> { let mut plan = input; let mut groups = group_window_expr_by_sort_keys(&window_exprs)?; // sort by sort_key len descending, so that more deeply sorted plans gets nested further // down as children; to further mimic the behavior of PostgreSQL, we want stable sort // and a reverse so that tieing sort keys are reversed in order; note that by this rule // if there's an empty over, it'll be at the top level groups.sort_by(|(key_a, _), (key_b, _)| key_a.len().cmp(&key_b.len())); groups.reverse(); for (_, exprs) in groups { let window_exprs = exprs.into_iter().cloned().collect::<Vec<_>>(); // the partition and sort itself is done at physical level, see physical_planner's // fn create_initial_plan plan = LogicalPlanBuilder::from(plan) .window(window_exprs)? .build()?; } Ok(plan) } /// Wrap a plan in an aggregate fn aggregate( &self, input: LogicalPlan, select_exprs: &[Expr], having_expr_opt: &Option<Expr>, group_by_exprs: Vec<Expr>, aggr_exprs: Vec<Expr>, ) -> Result<(LogicalPlan, Vec<Expr>, Option<Expr>)> { let aggr_projection_exprs = group_by_exprs .iter() .chain(aggr_exprs.iter()) .cloned() .collect::<Vec<Expr>>(); let plan = LogicalPlanBuilder::from(input.clone()) .aggregate(group_by_exprs, aggr_exprs)? .build()?; // After aggregation, these are all of the columns that will be // available to next phases of planning. let column_exprs_post_aggr = aggr_projection_exprs .iter() .map(|expr| expr_as_column_expr(expr, &input)) .collect::<Result<Vec<Expr>>>()?; // Rewrite the SELECT expression to use the columns produced by the // aggregation. let select_exprs_post_aggr = select_exprs .iter() .map(|expr| rebase_expr(expr, &aggr_projection_exprs, &input)) .collect::<Result<Vec<Expr>>>()?; if !can_columns_satisfy_exprs(&column_exprs_post_aggr, &select_exprs_post_aggr)? { return Err(DataFusionError::Plan( "Projection references non-aggregate values".to_owned(), )); } // Rewrite the HAVING expression to use the columns produced by the // aggregation. let having_expr_post_aggr_opt = if let Some(having_expr) = having_expr_opt { let having_expr_post_aggr = rebase_expr(having_expr, &aggr_projection_exprs, &input)?; if !can_columns_satisfy_exprs( &column_exprs_post_aggr, &[having_expr_post_aggr.clone()], )? { return Err(DataFusionError::Plan( "Having references non-aggregate values".to_owned(), )); } Some(having_expr_post_aggr) } else { None }; Ok((plan, select_exprs_post_aggr, having_expr_post_aggr_opt)) } /// Wrap a plan in a limit fn limit(&self, input: LogicalPlan, limit: &Option<SQLExpr>) -> Result<LogicalPlan> { match *limit { Some(ref limit_expr) => { let n = match self.sql_to_rex(limit_expr, input.schema())? { Expr::Literal(ScalarValue::Int64(Some(n))) => Ok(n as usize), _ => Err(DataFusionError::Plan( "Unexpected expression for LIMIT clause".to_string(), )), }?; LogicalPlanBuilder::from(input).limit(n)?.build() } _ => Ok(input), } } /// Wrap the logical in a sort fn order_by( &self, plan: LogicalPlan, order_by: &[OrderByExpr], ) -> Result<LogicalPlan> { if order_by.is_empty() { return Ok(plan); } let order_by_rex = order_by .iter() .map(|e| self.order_by_to_sort_expr(e, plan.schema())) .collect::<Result<Vec<_>>>()?; LogicalPlanBuilder::from(plan).sort(order_by_rex)?.build() } /// convert sql OrderByExpr to Expr::Sort fn order_by_to_sort_expr(&self, e: &OrderByExpr, schema: &DFSchema) -> Result<Expr> { Ok(Expr::Sort { expr: Box::new(self.sql_expr_to_logical_expr(&e.expr, schema)?), // by default asc asc: e.asc.unwrap_or(true), // by default nulls first to be consistent with spark nulls_first: e.nulls_first.unwrap_or(true), }) } /// Validate the schema provides all of the columns referenced in the expressions. fn validate_schema_satisfies_exprs( &self, schema: &DFSchema, exprs: &[Expr], ) -> Result<()> { find_column_exprs(exprs) .iter() .try_for_each(|col| match col { Expr::Column(col) => { match &col.relation { Some(r) => schema.field_with_qualified_name(r, &col.name), None => schema.field_with_unqualified_name(&col.name), } .map_err(|_| { DataFusionError::Plan(format!( "Invalid identifier '{}' for schema {}", col, schema.to_string() )) })?; Ok(()) } _ => Err(DataFusionError::Internal("Not a column".to_string())), }) } /// Generate a relational expression from a select SQL expression fn sql_select_to_rex(&self, sql: &SelectItem, schema: &DFSchema) -> Result<Expr> { match sql { SelectItem::UnnamedExpr(expr) => self.sql_to_rex(expr, schema), SelectItem::ExprWithAlias { expr, alias } => Ok(Alias( Box::new(self.sql_to_rex(expr, schema)?), alias.value.clone(), )), SelectItem::Wildcard => Ok(Expr::Wildcard), SelectItem::QualifiedWildcard(_) => Err(DataFusionError::NotImplemented( "Qualified wildcards are not supported".to_string(), )), } } /// Generate a relational expression from a SQL expression pub fn sql_to_rex(&self, sql: &SQLExpr, schema: &DFSchema) -> Result<Expr> { let expr = self.sql_expr_to_logical_expr(sql, schema)?; self.validate_schema_satisfies_exprs(schema, &[expr.clone()])?; Ok(expr) } fn sql_fn_arg_to_logical_expr( &self, sql: &FunctionArg, schema: &DFSchema, ) -> Result<Expr> { match sql { FunctionArg::Named { name: _, arg } => { self.sql_expr_to_logical_expr(arg, schema) } FunctionArg::Unnamed(value) => self.sql_expr_to_logical_expr(value, schema), } } fn sql_expr_to_logical_expr(&self, sql: &SQLExpr, schema: &DFSchema) -> Result<Expr> { match sql { SQLExpr::Value(Value::Number(n, _)) => match n.parse::<i64>() { Ok(n) => Ok(lit(n)), Err(_) => Ok(lit(n.parse::<f64>().unwrap())), }, SQLExpr::Value(Value::SingleQuotedString(ref s)) => Ok(lit(s.clone())), SQLExpr::Value(Value::Boolean(n)) => Ok(lit(*n)), SQLExpr::Value(Value::Null) => Ok(Expr::Literal(ScalarValue::Utf8(None))), SQLExpr::Extract { field, expr } => Ok(Expr::ScalarFunction { fun: functions::BuiltinScalarFunction::DatePart, args: vec![ Expr::Literal(ScalarValue::Utf8(Some(format!("{}", field)))), self.sql_expr_to_logical_expr(expr, schema)?, ], }), SQLExpr::Value(Value::Interval { value, leading_field, leading_precision, last_field, fractional_seconds_precision, }) => self.sql_interval_to_literal( value, leading_field, leading_precision, last_field, fractional_seconds_precision, ), SQLExpr::Identifier(ref id) => { if id.value.starts_with('@') { let var_names = vec![id.value.clone()]; Ok(Expr::ScalarVariable(var_names)) } else { Ok(Expr::Column( schema .field_with_unqualified_name(&id.value)? .qualified_column(), )) } } SQLExpr::CompoundIdentifier(ids) => { let mut var_names = vec![]; for id in ids { var_names.push(id.value.clone()); } if &var_names[0][0..1] == "@" { Ok(Expr::ScalarVariable(var_names)) } else if var_names.len() == 2 { // table.column identifier let name = var_names.pop().unwrap(); let relation = Some(var_names.pop().unwrap()); Ok(Expr::Column(Column { relation, name })) } else { Err(DataFusionError::NotImplemented(format!( "Unsupported compound identifier '{:?}'", var_names, ))) } } SQLExpr::Wildcard => Ok(Expr::Wildcard), SQLExpr::Case { operand, conditions, results, else_result, } => { let expr = if let Some(e) = operand { Some(Box::new(self.sql_expr_to_logical_expr(e, schema)?)) } else { None }; let when_expr = conditions .iter() .map(|e| self.sql_expr_to_logical_expr(e, schema)) .collect::<Result<Vec<_>>>()?; let then_expr = results .iter() .map(|e| self.sql_expr_to_logical_expr(e, schema)) .collect::<Result<Vec<_>>>()?; let else_expr = if let Some(e) = else_result { Some(Box::new(self.sql_expr_to_logical_expr(e, schema)?)) } else { None }; Ok(Expr::Case { expr, when_then_expr: when_expr .iter() .zip(then_expr.iter()) .map(|(w, t)| (Box::new(w.to_owned()), Box::new(t.to_owned()))) .collect(), else_expr, }) } SQLExpr::Cast { ref expr, ref data_type, } => Ok(Expr::Cast { expr: Box::new(self.sql_expr_to_logical_expr(expr, schema)?), data_type: convert_data_type(data_type)?, }), SQLExpr::TryCast { ref expr, ref data_type, } => Ok(Expr::TryCast { expr: Box::new(self.sql_expr_to_logical_expr(expr, schema)?), data_type: convert_data_type(data_type)?, }), SQLExpr::TypedString { ref data_type, ref value, } => Ok(Expr::Cast { expr: Box::new(lit(&**value)), data_type: convert_data_type(data_type)?, }), SQLExpr::IsNull(ref expr) => Ok(Expr::IsNull(Box::new( self.sql_expr_to_logical_expr(expr, schema)?, ))), SQLExpr::IsNotNull(ref expr) => Ok(Expr::IsNotNull(Box::new( self.sql_expr_to_logical_expr(expr, schema)?, ))), SQLExpr::UnaryOp { ref op, ref expr } => match op { UnaryOperator::Not => Ok(Expr::Not(Box::new( self.sql_expr_to_logical_expr(expr, schema)?, ))), UnaryOperator::Plus => Ok(self.sql_expr_to_logical_expr(expr, schema)?), UnaryOperator::Minus => { match expr.as_ref() { // optimization: if it's a number literal, we apply the negative operator // here directly to calculate the new literal. SQLExpr::Value(Value::Number(n,_)) => match n.parse::<i64>() { Ok(n) => Ok(lit(-n)), Err(_) => Ok(lit(-n .parse::<f64>() .map_err(|_e| { DataFusionError::Internal(format!( "negative operator can be only applied to integer and float operands, got: {}", n)) })?)), }, // not a literal, apply negative operator on expression _ => Ok(Expr::Negative(Box::new(self.sql_expr_to_logical_expr(expr, schema)?))), } } _ => Err(DataFusionError::NotImplemented(format!( "Unsupported SQL unary operator {:?}", op ))), }, SQLExpr::Between { ref expr, ref negated, ref low, ref high, } => Ok(Expr::Between { expr: Box::new(self.sql_expr_to_logical_expr(expr, schema)?), negated: *negated, low: Box::new(self.sql_expr_to_logical_expr(low, schema)?), high: Box::new(self.sql_expr_to_logical_expr(high, schema)?), }), SQLExpr::InList { ref expr, ref list, ref negated, } => { let list_expr = list .iter() .map(|e| self.sql_expr_to_logical_expr(e, schema)) .collect::<Result<Vec<_>>>()?; Ok(Expr::InList { expr: Box::new(self.sql_expr_to_logical_expr(expr, schema)?), list: list_expr, negated: *negated, }) } SQLExpr::BinaryOp { ref left, ref op, ref right, } => { let operator = match *op { BinaryOperator::Gt => Ok(Operator::Gt), BinaryOperator::GtEq => Ok(Operator::GtEq), BinaryOperator::Lt => Ok(Operator::Lt), BinaryOperator::LtEq => Ok(Operator::LtEq), BinaryOperator::Eq => Ok(Operator::Eq), BinaryOperator::NotEq => Ok(Operator::NotEq), BinaryOperator::Plus => Ok(Operator::Plus), BinaryOperator::Minus => Ok(Operator::Minus), BinaryOperator::Multiply => Ok(Operator::Multiply), BinaryOperator::Divide => Ok(Operator::Divide), BinaryOperator::Modulus => Ok(Operator::Modulus), BinaryOperator::And => Ok(Operator::And), BinaryOperator::Or => Ok(Operator::Or), BinaryOperator::Like => Ok(Operator::Like), BinaryOperator::NotLike => Ok(Operator::NotLike), _ => Err(DataFusionError::NotImplemented(format!( "Unsupported SQL binary operator {:?}", op ))), }?; Ok(Expr::BinaryExpr { left: Box::new(self.sql_expr_to_logical_expr(left, schema)?), op: operator, right: Box::new(self.sql_expr_to_logical_expr(right, schema)?), }) } SQLExpr::Function(function) => { let name = if function.name.0.len() > 1 { // DF doesn't handle compound identifiers // (e.g. "foo.bar") for function names yet function.name.to_string() } else { // if there is a quote style, then don't normalize // the name, otherwise normalize to lowercase let ident = &function.name.0[0]; match ident.quote_style { Some(_) => ident.value.clone(), None => ident.value.to_ascii_lowercase(), } }; // first, scalar built-in if let Ok(fun) = functions::BuiltinScalarFunction::from_str(&name) { let args = self.function_args_to_expr(function, schema)?; return Ok(Expr::ScalarFunction { fun, args }); }; // then, window function if let Some(window) = &function.over { let partition_by = window .partition_by .iter() .map(|e| self.sql_expr_to_logical_expr(e, schema)) .collect::<Result<Vec<_>>>()?; let order_by = window .order_by .iter() .map(|e| self.order_by_to_sort_expr(e, schema)) .collect::<Result<Vec<_>>>()?; let window_frame = window .window_frame .as_ref() .map(|window_frame| { let window_frame: WindowFrame = window_frame.clone().try_into()?; if WindowFrameUnits::Range == window_frame.units && order_by.len() != 1 { Err(DataFusionError::Plan(format!( "With window frame of type RANGE, the order by expression must be of length 1, got {}", order_by.len()))) } else { Ok(window_frame) } }) .transpose()?; let fun = window_functions::WindowFunction::from_str(&name)?; match fun { window_functions::WindowFunction::AggregateFunction( aggregate_fun, ) => { return Ok(Expr::WindowFunction { fun: window_functions::WindowFunction::AggregateFunction( aggregate_fun.clone(), ), args: self.aggregate_fn_to_expr( &aggregate_fun, function, schema, )?, partition_by, order_by, window_frame, }); } window_functions::WindowFunction::BuiltInWindowFunction( window_fun, ) => { return Ok(Expr::WindowFunction { fun: window_functions::WindowFunction::BuiltInWindowFunction( window_fun, ), args:self.function_args_to_expr(function, schema)?, partition_by, order_by, window_frame, }); } } } // next, aggregate built-ins if let Ok(fun) = aggregates::AggregateFunction::from_str(&name) { let args = self.aggregate_fn_to_expr(&fun, function, schema)?; return Ok(Expr::AggregateFunction { fun, distinct: function.distinct, args, }); }; // finally, user-defined functions (UDF) and UDAF match self.schema_provider.get_function_meta(&name) { Some(fm) => { let args = self.function_args_to_expr(function, schema)?; Ok(Expr::ScalarUDF { fun: fm, args }) } None => match self.schema_provider.get_aggregate_meta(&name) { Some(fm) => { let args = self.function_args_to_expr(function, schema)?; Ok(Expr::AggregateUDF { fun: fm, args }) } _ => Err(DataFusionError::Plan(format!( "Invalid function '{}'", name ))), }, } } SQLExpr::Nested(e) => self.sql_expr_to_logical_expr(e, schema), _ => Err(DataFusionError::NotImplemented(format!( "Unsupported ast node {:?} in sqltorel", sql ))), } } fn function_args_to_expr( &self, function: &sqlparser::ast::Function, schema: &DFSchema, ) -> Result<Vec<Expr>> { function .args .iter() .map(|a| self.sql_fn_arg_to_logical_expr(a, schema)) .collect::<Result<Vec<Expr>>>() } fn aggregate_fn_to_expr( &self, fun: &aggregates::AggregateFunction, function: &sqlparser::ast::Function, schema: &DFSchema, ) -> Result<Vec<Expr>> { if *fun == aggregates::AggregateFunction::Count { function .args .iter() .map(|a| match a { FunctionArg::Unnamed(SQLExpr::Value(Value::Number(_, _))) => { Ok(lit(1_u8)) } FunctionArg::Unnamed(SQLExpr::Wildcard) => Ok(lit(1_u8)), _ => self.sql_fn_arg_to_logical_expr(a, schema), }) .collect::<Result<Vec<Expr>>>() } else { self.function_args_to_expr(function, schema) } } fn sql_interval_to_literal( &self, value: &str, leading_field: &Option<DateTimeField>, leading_precision: &Option<u64>, last_field: &Option<DateTimeField>, fractional_seconds_precision: &Option<u64>, ) -> Result<Expr> { if leading_precision.is_some() { return Err(DataFusionError::NotImplemented(format!( "Unsupported Interval Expression with leading_precision {:?}", leading_precision ))); } if last_field.is_some() { return Err(DataFusionError::NotImplemented(format!( "Unsupported Interval Expression with last_field {:?}", last_field ))); } if fractional_seconds_precision.is_some() { return Err(DataFusionError::NotImplemented(format!( "Unsupported Interval Expression with fractional_seconds_precision {:?}", fractional_seconds_precision ))); } const SECONDS_PER_HOUR: f32 = 3_600_f32; const MILLIS_PER_SECOND: f32 = 1_000_f32; // We are storing parts as integers, it's why we need to align parts fractional // INTERVAL '0.5 MONTH' = 15 days, INTERVAL '1.5 MONTH' = 1 month 15 days // INTERVAL '0.5 DAY' = 12 hours, INTERVAL '1.5 DAY' = 1 day 12 hours let align_interval_parts = |month_part: f32, mut day_part: f32, mut milles_part: f32| -> (i32, i32, f32) { // Convert fractional month to days, It's not supported by Arrow types, but anyway day_part += (month_part - (month_part as i32) as f32) * 30_f32; // Convert fractional days to hours milles_part += (day_part - ((day_part as i32) as f32)) * 24_f32 * SECONDS_PER_HOUR * MILLIS_PER_SECOND; (month_part as i32, day_part as i32, milles_part) }; let calculate_from_part = |interval_period_str: &str, interval_type: &str| -> Result<(i32, i32, f32)> { // @todo It's better to use Decimal in order to protect rounding errors // Wait https://github.com/apache/arrow/pull/9232 let interval_period = match f32::from_str(interval_period_str) { Ok(n) => n, Err(_) => { return Err(DataFusionError::SQL(ParserError(format!( "Unsupported Interval Expression with value {:?}", value )))) } }; if interval_period > (i32::MAX as f32) { return Err(DataFusionError::NotImplemented(format!( "Interval field value out of range: {:?}", value ))); } match interval_type.to_lowercase().as_str() { "year" => Ok(align_interval_parts(interval_period * 12_f32, 0.0, 0.0)), "month" => Ok(align_interval_parts(interval_period, 0.0, 0.0)), "day" | "days" => Ok(align_interval_parts(0.0, interval_period, 0.0)), "hour" | "hours" => { Ok((0, 0, interval_period * SECONDS_PER_HOUR * MILLIS_PER_SECOND)) } "minutes" | "minute" => { Ok((0, 0, interval_period * 60_f32 * MILLIS_PER_SECOND)) } "seconds" | "second" => Ok((0, 0, interval_period * MILLIS_PER_SECOND)), "milliseconds" | "millisecond" => Ok((0, 0, interval_period)), _ => Err(DataFusionError::NotImplemented(format!( "Invalid input syntax for type interval: {:?}", value ))), } }; let mut result_month: i64 = 0; let mut result_days: i64 = 0; let mut result_millis: i64 = 0; let mut parts = value.split_whitespace(); loop { let interval_period_str = parts.next(); if interval_period_str.is_none() { break; } let leading_field = leading_field .as_ref() .map(|dt| dt.to_string()) .unwrap_or_else(|| "second".to_string()); let unit = parts .next() .map(|part| part.to_string()) .unwrap_or(leading_field); let (diff_month, diff_days, diff_millis) = calculate_from_part(interval_period_str.unwrap(), &unit)?; result_month += diff_month as i64; if result_month > (i32::MAX as i64) { return Err(DataFusionError::NotImplemented(format!( "Interval field value out of range: {:?}", value ))); } result_days += diff_days as i64; if result_days > (i32::MAX as i64) { return Err(DataFusionError::NotImplemented(format!( "Interval field value out of range: {:?}", value ))); } result_millis += diff_millis as i64; if result_millis > (i32::MAX as i64) { return Err(DataFusionError::NotImplemented(format!( "Interval field value out of range: {:?}", value ))); } } // Interval is tricky thing // 1 day is not 24 hours because timezones, 1 year != 365/364! 30 days != 1 month // The true way to store and calculate intervals is to store it as it defined // Due the fact that Arrow supports only two types YearMonth (month) and DayTime (day, time) // It's not possible to store complex intervals // It's possible to do select (NOW() + INTERVAL '1 year') + INTERVAL '1 day'; as workaround if result_month != 0 && (result_days != 0 || result_millis != 0) { return Err(DataFusionError::NotImplemented(format!( "DF does not support intervals that have both a Year/Month part as well as Days/Hours/Mins/Seconds: {:?}. Hint: try breaking the interval into two parts, one with Year/Month and the other with Days/Hours/Mins/Seconds - e.g. (NOW() + INTERVAL '1 year') + INTERVAL '1 day'", value ))); } if result_month != 0 { return Ok(Expr::Literal(ScalarValue::IntervalYearMonth(Some( result_month as i32, )))); } let result: i64 = (result_days << 32) | result_millis; Ok(Expr::Literal(ScalarValue::IntervalDayTime(Some(result)))) } fn show_variable_to_plan(&self, variable: &[Ident]) -> Result<LogicalPlan> { // Special case SHOW TABLES let variable = ObjectName(variable.to_vec()).to_string(); if variable.as_str().eq_ignore_ascii_case("tables") { if self.has_table("information_schema", "tables") { let rewrite = DFParser::parse_sql("SELECT * FROM information_schema.tables;")?; self.statement_to_plan(&rewrite[0]) } else { Err(DataFusionError::Plan( "SHOW TABLES is not supported unless information_schema is enabled" .to_string(), )) } } else { Err(DataFusionError::NotImplemented(format!( "SHOW {} not implemented. Supported syntax: SHOW <TABLES>", variable ))) } } fn show_columns_to_plan( &self, extended: bool, full: bool, table_name: &ObjectName, filter: Option<&ShowStatementFilter>, ) -> Result<LogicalPlan> { if filter.is_some() { return Err(DataFusionError::Plan( "SHOW COLUMNS with WHERE or LIKE is not supported".to_string(), )); } if !self.has_table("information_schema", "columns") { return Err(DataFusionError::Plan( "SHOW COLUMNS is not supported unless information_schema is enabled" .to_string(), )); } if self .schema_provider .get_table_provider(table_name.try_into()?) .is_none() { return Err(DataFusionError::Plan(format!( "Unknown relation for SHOW COLUMNS: {}", table_name ))); } // Figure out the where clause let columns = vec!["table_name", "table_schema", "table_catalog"].into_iter(); let where_clause = table_name .0 .iter() .rev() .zip(columns) .map(|(ident, column_name)| { format!(r#"{} = '{}'"#, column_name, ident.to_string()) }) .collect::<Vec<_>>() .join(" AND "); // treat both FULL and EXTENDED as the same let select_list = if full || extended { "*" } else { "table_catalog, table_schema, table_name, column_name, data_type, is_nullable" }; let query = format!( "SELECT {} FROM information_schema.columns WHERE {}", select_list, where_clause ); let rewrite = DFParser::parse_sql(&query)?; self.statement_to_plan(&rewrite[0]) } /// Return true if there is a table provider available for "schema.table" fn has_table(&self, schema: &str, table: &str) -> bool { let tables_reference = TableReference::Partial { schema, table }; self.schema_provider .get_table_provider(tables_reference) .is_some() } } /// Remove join expressions from a filter expression fn remove_join_expressions( expr: &Expr, join_columns: &HashSet<(Column, Column)>, ) -> Result<Option<Expr>> { match expr { Expr::BinaryExpr { left, op, right } => match op { Operator::Eq => match (left.as_ref(), right.as_ref()) { (Expr::Column(l), Expr::Column(r)) => { if join_columns.contains(&(l.clone(), r.clone())) || join_columns.contains(&(r.clone(), l.clone())) { Ok(None) } else { Ok(Some(expr.clone())) } } _ => Ok(Some(expr.clone())), }, Operator::And => { let l = remove_join_expressions(left, join_columns)?; let r = remove_join_expressions(right, join_columns)?; match (l, r) { (Some(ll), Some(rr)) => Ok(Some(and(ll, rr))), (Some(ll), _) => Ok(Some(ll)), (_, Some(rr)) => Ok(Some(rr)), _ => Ok(None), } } _ => Ok(Some(expr.clone())), }, _ => Ok(Some(expr.clone())), } } /// Parse equijoin ON condition which could be a single Eq or multiple conjunctive Eqs /// /// Examples /// /// foo = bar /// foo = bar AND bar = baz AND ... /// fn extract_join_keys(expr: &Expr, accum: &mut Vec<(Column, Column)>) -> Result<()> { match expr { Expr::BinaryExpr { left, op, right } => match op { Operator::Eq => match (left.as_ref(), right.as_ref()) { (Expr::Column(l), Expr::Column(r)) => { accum.push((l.clone(), r.clone())); Ok(()) } other => Err(DataFusionError::SQL(ParserError(format!( "Unsupported expression '{:?}' in JOIN condition", other )))), }, Operator::And => { extract_join_keys(left, accum)?; extract_join_keys(right, accum) } other => Err(DataFusionError::SQL(ParserError(format!( "Unsupported expression '{:?}' in JOIN condition", other )))), }, other => Err(DataFusionError::SQL(ParserError(format!( "Unsupported expression '{:?}' in JOIN condition", other )))), } } /// Extract join keys from a WHERE clause fn extract_possible_join_keys( expr: &Expr, accum: &mut Vec<(Column, Column)>, ) -> Result<()> { match expr { Expr::BinaryExpr { left, op, right } => match op { Operator::Eq => match (left.as_ref(), right.as_ref()) { (Expr::Column(l), Expr::Column(r)) => { accum.push((l.clone(), r.clone())); Ok(()) } _ => Ok(()), }, Operator::And => { extract_possible_join_keys(left, accum)?; extract_possible_join_keys(right, accum) } _ => Ok(()), }, _ => Ok(()), } } /// Convert SQL data type to relational representation of data type pub fn convert_data_type(sql: &SQLDataType) -> Result<DataType> { match sql { SQLDataType::Boolean => Ok(DataType::Boolean), SQLDataType::SmallInt => Ok(DataType::Int16), SQLDataType::Int => Ok(DataType::Int32), SQLDataType::BigInt => Ok(DataType::Int64), SQLDataType::Float(_) | SQLDataType::Real => Ok(DataType::Float64), SQLDataType::Double => Ok(DataType::Float64), SQLDataType::Char(_) | SQLDataType::Varchar(_) => Ok(DataType::Utf8), SQLDataType::Timestamp => Ok(DataType::Timestamp(TimeUnit::Nanosecond, None)), SQLDataType::Date => Ok(DataType::Date32), other => Err(DataFusionError::NotImplemented(format!( "Unsupported SQL type {:?}", other ))), } } #[cfg(test)] mod tests { use super::*; use crate::datasource::empty::EmptyTable; use crate::{logical_plan::create_udf, sql::parser::DFParser}; use functions::ScalarFunctionImplementation; #[test] fn select_no_relation() { quick_test( "SELECT 1", "Projection: Int64(1)\ \n EmptyRelation", ); } #[test] fn select_column_does_not_exist() { let sql = "SELECT doesnotexist FROM person"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::Plan(msg) if msg.contains("No field with unqualified name 'doesnotexist'"), )); } #[test] fn select_repeated_column() { let sql = "SELECT age, age FROM person"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r##"Plan("Projections require unique expression names but the expression \"#person.age\" at position 0 and \"#person.age\" at position 1 have the same name. Consider aliasing (\"AS\") one of them.")"##, format!("{:?}", err) ); } #[test] fn select_wildcard_with_repeated_column() { let sql = "SELECT *, age FROM person"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r##"Plan("Projections require unique expression names but the expression \"#person.age\" at position 3 and \"#person.age\" at position 8 have the same name. Consider aliasing (\"AS\") one of them.")"##, format!("{:?}", err) ); } #[test] fn select_wildcard_with_repeated_column_but_is_aliased() { quick_test( "SELECT *, first_name AS fn from person", "Projection: #person.id, #person.first_name, #person.last_name, #person.age, #person.state, #person.salary, #person.birth_date, #person.😀, #person.first_name AS fn\ \n TableScan: person projection=None", ); } #[test] fn select_scalar_func_with_literal_no_relation() { quick_test( "SELECT sqrt(9)", "Projection: sqrt(Int64(9))\ \n EmptyRelation", ); } #[test] fn select_simple_filter() { let sql = "SELECT id, first_name, last_name \ FROM person WHERE state = 'CO'"; let expected = "Projection: #person.id, #person.first_name, #person.last_name\ \n Filter: #person.state Eq Utf8(\"CO\")\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_filter_column_does_not_exist() { let sql = "SELECT first_name FROM person WHERE doesnotexist = 'A'"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::Plan(msg) if msg.contains("No field with unqualified name 'doesnotexist'"), )); } #[test] fn select_filter_cannot_use_alias() { let sql = "SELECT first_name AS x FROM person WHERE x = 'A'"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::Plan(msg) if msg.contains("No field with unqualified name 'x'"), )); } #[test] fn select_neg_filter() { let sql = "SELECT id, first_name, last_name \ FROM person WHERE NOT state"; let expected = "Projection: #person.id, #person.first_name, #person.last_name\ \n Filter: NOT #person.state\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_compound_filter() { let sql = "SELECT id, first_name, last_name \ FROM person WHERE state = 'CO' AND age >= 21 AND age <= 65"; let expected = "Projection: #person.id, #person.first_name, #person.last_name\ \n Filter: #person.state Eq Utf8(\"CO\") And #person.age GtEq Int64(21) And #person.age LtEq Int64(65)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn test_timestamp_filter() { let sql = "SELECT state FROM person WHERE birth_date < CAST (158412331400600000 as timestamp)"; let expected = "Projection: #person.state\ \n Filter: #person.birth_date Lt CAST(Int64(158412331400600000) AS Timestamp(Nanosecond, None))\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn test_date_filter() { let sql = "SELECT state FROM person WHERE birth_date < CAST ('2020-01-01' as date)"; let expected = "Projection: #person.state\ \n Filter: #person.birth_date Lt CAST(Utf8(\"2020-01-01\") AS Date32)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_all_boolean_operators() { let sql = "SELECT age, first_name, last_name \ FROM person \ WHERE age = 21 \ AND age != 21 \ AND age > 21 \ AND age >= 21 \ AND age < 65 \ AND age <= 65"; let expected = "Projection: #person.age, #person.first_name, #person.last_name\ \n Filter: #person.age Eq Int64(21) \ And #person.age NotEq Int64(21) \ And #person.age Gt Int64(21) \ And #person.age GtEq Int64(21) \ And #person.age Lt Int64(65) \ And #person.age LtEq Int64(65)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_between() { let sql = "SELECT state FROM person WHERE age BETWEEN 21 AND 65"; let expected = "Projection: #person.state\ \n Filter: #person.age BETWEEN Int64(21) AND Int64(65)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_between_negated() { let sql = "SELECT state FROM person WHERE age NOT BETWEEN 21 AND 65"; let expected = "Projection: #person.state\ \n Filter: #person.age NOT BETWEEN Int64(21) AND Int64(65)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_nested() { let sql = "SELECT fn2, last_name FROM ( SELECT fn1 as fn2, last_name, birth_date FROM ( SELECT first_name AS fn1, last_name, birth_date, age FROM person ) )"; let expected = "Projection: #fn2, #person.last_name\ \n Projection: #fn1 AS fn2, #person.last_name, #person.birth_date\ \n Projection: #person.first_name AS fn1, #person.last_name, #person.birth_date, #person.age\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_nested_with_filters() { let sql = "SELECT fn1, age FROM ( SELECT first_name AS fn1, age FROM person WHERE age > 20 ) WHERE fn1 = 'X' AND age < 30"; let expected = "Projection: #fn1, #person.age\ \n Filter: #fn1 Eq Utf8(\"X\") And #person.age Lt Int64(30)\ \n Projection: #person.first_name AS fn1, #person.age\ \n Filter: #person.age Gt Int64(20)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_with_having() { let sql = "SELECT id, age FROM person HAVING age > 100 AND age < 200"; let expected = "Projection: #person.id, #person.age\ \n Filter: #person.age Gt Int64(100) And #person.age Lt Int64(200)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_with_having_referencing_column_not_in_select() { let sql = "SELECT id, age FROM person HAVING first_name = 'M'"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Having references column(s) not provided by the select\")", format!("{:?}", err) ); } #[test] fn select_with_having_referencing_column_nested_in_select_expression() { let sql = "SELECT id, age + 1 FROM person HAVING age > 100"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Having references column(s) not provided by the select\")", format!("{:?}", err) ); } #[test] fn select_with_having_with_aggregate_not_in_select() { let sql = "SELECT first_name FROM person HAVING MAX(age) > 100"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Projection references non-aggregate values\")", format!("{:?}", err) ); } #[test] fn select_aggregate_with_having_that_reuses_aggregate() { let sql = "SELECT MAX(age) FROM person HAVING MAX(age) < 30"; let expected = "Projection: #MAX(person.age)\ \n Filter: #MAX(person.age) Lt Int64(30)\ \n Aggregate: groupBy=[[]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_having_with_aggregate_not_in_select() { let sql = "SELECT MAX(age) FROM person HAVING MAX(first_name) > 'M'"; let expected = "Projection: #MAX(person.age)\ \n Filter: #MAX(person.first_name) Gt Utf8(\"M\")\ \n Aggregate: groupBy=[[]], aggr=[[MAX(#person.age), MAX(#person.first_name)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_having_referencing_column_not_in_select() { let sql = "SELECT COUNT(*) FROM person HAVING first_name = 'M'"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Having references non-aggregate values\")", format!("{:?}", err) ); } #[test] fn select_aggregate_aliased_with_having_referencing_aggregate_by_its_alias() { let sql = "SELECT MAX(age) as max_age FROM person HAVING max_age < 30"; // FIXME: add test for having in execution let expected = "Projection: #MAX(person.age) AS max_age\ \n Filter: #MAX(person.age) Lt Int64(30)\ \n Aggregate: groupBy=[[]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_aliased_with_having_that_reuses_aggregate_but_not_by_its_alias() { let sql = "SELECT MAX(age) as max_age FROM person HAVING MAX(age) < 30"; let expected = "Projection: #MAX(person.age) AS max_age\ \n Filter: #MAX(person.age) Lt Int64(30)\ \n Aggregate: groupBy=[[]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having() { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING first_name = 'M'"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #person.first_name Eq Utf8(\"M\")\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_and_where() { let sql = "SELECT first_name, MAX(age) FROM person WHERE id > 5 GROUP BY first_name HAVING MAX(age) < 100"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Lt Int64(100)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n Filter: #person.id Gt Int64(5)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_and_where_filtering_on_aggregate_column( ) { let sql = "SELECT first_name, MAX(age) FROM person WHERE id > 5 AND age > 18 GROUP BY first_name HAVING MAX(age) < 100"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Lt Int64(100)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n Filter: #person.id Gt Int64(5) And #person.age Gt Int64(18)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_using_column_by_alias() { let sql = "SELECT first_name AS fn, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 2 AND fn = 'M'"; let expected = "Projection: #person.first_name AS fn, #MAX(person.age)\ \n Filter: #MAX(person.age) Gt Int64(2) And #person.first_name Eq Utf8(\"M\")\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_using_columns_with_and_without_their_aliases( ) { let sql = "SELECT first_name AS fn, MAX(age) AS max_age FROM person GROUP BY first_name HAVING MAX(age) > 2 AND max_age < 5 AND first_name = 'M' AND fn = 'N'"; let expected = "Projection: #person.first_name AS fn, #MAX(person.age) AS max_age\ \n Filter: #MAX(person.age) Gt Int64(2) And #MAX(person.age) Lt Int64(5) And #person.first_name Eq Utf8(\"M\") And #person.first_name Eq Utf8(\"N\")\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_that_reuses_aggregate() { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 100"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Gt Int64(100)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_referencing_column_not_in_group_by() { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 10 AND last_name = 'M'"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Having references non-aggregate values\")", format!("{:?}", err) ); } #[test] fn select_aggregate_with_group_by_with_having_that_reuses_aggregate_multiple_times() { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 100 AND MAX(age) < 200"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Gt Int64(100) And #MAX(person.age) Lt Int64(200)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_using_aggreagate_not_in_select() { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 100 AND MIN(id) < 50"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Gt Int64(100) And #MIN(person.id) Lt Int64(50)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age), MIN(#person.id)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_aliased_with_group_by_with_having_referencing_aggregate_by_its_alias( ) { let sql = "SELECT first_name, MAX(age) AS max_age FROM person GROUP BY first_name HAVING max_age > 100"; let expected = "Projection: #person.first_name, #MAX(person.age) AS max_age\ \n Filter: #MAX(person.age) Gt Int64(100)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_compound_aliased_with_group_by_with_having_referencing_compound_aggregate_by_its_alias( ) { let sql = "SELECT first_name, MAX(age) + 1 AS max_age_plus_one FROM person GROUP BY first_name HAVING max_age_plus_one > 100"; let expected = "Projection: #person.first_name, #MAX(person.age) Plus Int64(1) AS max_age_plus_one\ \n Filter: #MAX(person.age) Plus Int64(1) Gt Int64(100)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_using_derived_column_aggreagate_not_in_select( ) { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 100 AND MIN(id - 2) < 50"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Gt Int64(100) And #MIN(person.id Minus Int64(2)) Lt Int64(50)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age), MIN(#person.id Minus Int64(2))]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aggregate_with_group_by_with_having_using_count_star_not_in_select() { let sql = "SELECT first_name, MAX(age) FROM person GROUP BY first_name HAVING MAX(age) > 100 AND COUNT(*) < 50"; let expected = "Projection: #person.first_name, #MAX(person.age)\ \n Filter: #MAX(person.age) Gt Int64(100) And #COUNT(UInt8(1)) Lt Int64(50)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.age), COUNT(UInt8(1))]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_binary_expr() { let sql = "SELECT age + salary from person"; let expected = "Projection: #person.age Plus #person.salary\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_binary_expr_nested() { let sql = "SELECT (age + salary)/2 from person"; let expected = "Projection: #person.age Plus #person.salary Divide Int64(2)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_wildcard_with_groupby() { quick_test( r#"SELECT * FROM person GROUP BY id, first_name, last_name, age, state, salary, birth_date, "😀""#, "Projection: #person.id, #person.first_name, #person.last_name, #person.age, #person.state, #person.salary, #person.birth_date, #person.😀\ \n Aggregate: groupBy=[[#person.id, #person.first_name, #person.last_name, #person.age, #person.state, #person.salary, #person.birth_date, #person.😀]], aggr=[[]]\ \n TableScan: person projection=None", ); quick_test( "SELECT * FROM (SELECT first_name, last_name FROM person) GROUP BY first_name, last_name", "Projection: #person.first_name, #person.last_name\ \n Aggregate: groupBy=[[#person.first_name, #person.last_name]], aggr=[[]]\ \n Projection: #person.first_name, #person.last_name\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate() { quick_test( "SELECT MIN(age) FROM person", "Projection: #MIN(person.age)\ \n Aggregate: groupBy=[[]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn test_sum_aggregate() { quick_test( "SELECT SUM(age) from person", "Projection: #SUM(person.age)\ \n Aggregate: groupBy=[[]], aggr=[[SUM(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_column_does_not_exist() { let sql = "SELECT MIN(doesnotexist) FROM person"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::Plan(msg) if msg.contains("No field with unqualified name 'doesnotexist'"), )); } #[test] fn select_simple_aggregate_repeated_aggregate() { let sql = "SELECT MIN(age), MIN(age) FROM person"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r##"Plan("Projections require unique expression names but the expression \"MIN(#person.age)\" at position 0 and \"MIN(#person.age)\" at position 1 have the same name. Consider aliasing (\"AS\") one of them.")"##, format!("{:?}", err) ); } #[test] fn select_simple_aggregate_repeated_aggregate_with_single_alias() { quick_test( "SELECT MIN(age), MIN(age) AS a FROM person", "Projection: #MIN(person.age), #MIN(person.age) AS a\ \n Aggregate: groupBy=[[]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_repeated_aggregate_with_unique_aliases() { quick_test( "SELECT MIN(age) AS a, MIN(age) AS b FROM person", "Projection: #MIN(person.age) AS a, #MIN(person.age) AS b\ \n Aggregate: groupBy=[[]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_repeated_aggregate_with_repeated_aliases() { let sql = "SELECT MIN(age) AS a, MIN(age) AS a FROM person"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r##"Plan("Projections require unique expression names but the expression \"MIN(#person.age) AS a\" at position 0 and \"MIN(#person.age) AS a\" at position 1 have the same name. Consider aliasing (\"AS\") one of them.")"##, format!("{:?}", err) ); } #[test] fn select_simple_aggregate_with_groupby() { quick_test( "SELECT state, MIN(age), MAX(age) FROM person GROUP BY state", "Projection: #person.state, #MIN(person.age), #MAX(person.age)\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age), MAX(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_with_aliases() { quick_test( "SELECT state AS a, MIN(age) AS b FROM person GROUP BY state", "Projection: #person.state AS a, #MIN(person.age) AS b\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_with_aliases_repeated() { let sql = "SELECT state AS a, MIN(age) AS a FROM person GROUP BY state"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r##"Plan("Projections require unique expression names but the expression \"#person.state AS a\" at position 0 and \"MIN(#person.age) AS a\" at position 1 have the same name. Consider aliasing (\"AS\") one of them.")"##, format!("{:?}", err) ); } #[test] fn select_simple_aggregate_with_groupby_column_unselected() { quick_test( "SELECT MIN(age), MAX(age) FROM person GROUP BY state", "Projection: #MIN(person.age), #MAX(person.age)\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age), MAX(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_and_column_in_group_by_does_not_exist() { let sql = "SELECT SUM(age) FROM person GROUP BY doesnotexist"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::Plan(msg) if msg.contains("No field with unqualified name 'doesnotexist'"), )); } #[test] fn select_simple_aggregate_with_groupby_and_column_in_aggregate_does_not_exist() { let sql = "SELECT SUM(doesnotexist) FROM person GROUP BY first_name"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::Plan(msg) if msg.contains("No field with unqualified name 'doesnotexist'"), )); } #[test] fn select_interval_out_of_range() { let sql = "SELECT INTERVAL '100000000000000000 day'"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r#"NotImplemented("Interval field value out of range: \"100000000000000000 day\"")"#, format!("{:?}", err) ); } #[test] fn select_unsupported_complex_interval() { let sql = "SELECT INTERVAL '1 year 1 day'"; let err = logical_plan(sql).expect_err("query should have failed"); assert!(matches!( err, DataFusionError::NotImplemented(msg) if msg == "DF does not support intervals that have both a Year/Month part as well as Days/Hours/Mins/Seconds: \"1 year 1 day\". Hint: try breaking the interval into two parts, one with Year/Month and the other with Days/Hours/Mins/Seconds - e.g. (NOW() + INTERVAL '1 year') + INTERVAL '1 day'", )); } #[test] fn select_simple_aggregate_with_groupby_and_column_is_in_aggregate_and_groupby() { quick_test( "SELECT MAX(first_name) FROM person GROUP BY first_name", "Projection: #MAX(person.first_name)\ \n Aggregate: groupBy=[[#person.first_name]], aggr=[[MAX(#person.first_name)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_can_use_positions() { quick_test( "SELECT state, age AS b, COUNT(1) FROM person GROUP BY 1, 2", "Projection: #person.state, #person.age AS b, #COUNT(UInt8(1))\ \n Aggregate: groupBy=[[#person.state, #person.age]], aggr=[[COUNT(UInt8(1))]]\ \n TableScan: person projection=None", ); quick_test( "SELECT state, age AS b, COUNT(1) FROM person GROUP BY 2, 1", "Projection: #person.state, #person.age AS b, #COUNT(UInt8(1))\ \n Aggregate: groupBy=[[#person.age, #person.state]], aggr=[[COUNT(UInt8(1))]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_position_out_of_range() { let sql = "SELECT state, MIN(age) FROM person GROUP BY 0"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Projection references non-aggregate values\")", format!("{:?}", err) ); let sql2 = "SELECT state, MIN(age) FROM person GROUP BY 5"; let err2 = logical_plan(sql2).expect_err("query should have failed"); assert_eq!( "Plan(\"Projection references non-aggregate values\")", format!("{:?}", err2) ); } #[test] fn select_simple_aggregate_with_groupby_can_use_alias() { quick_test( "SELECT state AS a, MIN(age) AS b FROM person GROUP BY a", "Projection: #person.state AS a, #MIN(person.age) AS b\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_aggregate_repeated() { let sql = "SELECT state, MIN(age), MIN(age) FROM person GROUP BY state"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r##"Plan("Projections require unique expression names but the expression \"MIN(#person.age)\" at position 1 and \"MIN(#person.age)\" at position 2 have the same name. Consider aliasing (\"AS\") one of them.")"##, format!("{:?}", err) ); } #[test] fn select_simple_aggregate_with_groupby_aggregate_repeated_and_one_has_alias() { quick_test( "SELECT state, MIN(age), MIN(age) AS ma FROM person GROUP BY state", "Projection: #person.state, #MIN(person.age), #MIN(person.age) AS ma\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ) } #[test] fn select_simple_aggregate_with_groupby_non_column_expression_unselected() { quick_test( "SELECT MIN(first_name) FROM person GROUP BY age + 1", "Projection: #MIN(person.first_name)\ \n Aggregate: groupBy=[[#person.age Plus Int64(1)]], aggr=[[MIN(#person.first_name)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_non_column_expression_selected_and_resolvable( ) { quick_test( "SELECT age + 1, MIN(first_name) FROM person GROUP BY age + 1", "Projection: #person.age Plus Int64(1), #MIN(person.first_name)\ \n Aggregate: groupBy=[[#person.age Plus Int64(1)]], aggr=[[MIN(#person.first_name)]]\ \n TableScan: person projection=None", ); quick_test( "SELECT MIN(first_name), age + 1 FROM person GROUP BY age + 1", "Projection: #MIN(person.first_name), #person.age Plus Int64(1)\ \n Aggregate: groupBy=[[#person.age Plus Int64(1)]], aggr=[[MIN(#person.first_name)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_non_column_expression_nested_and_resolvable() { quick_test( "SELECT ((age + 1) / 2) * (age + 1), MIN(first_name) FROM person GROUP BY age + 1", "Projection: #person.age Plus Int64(1) Divide Int64(2) Multiply #person.age Plus Int64(1), #MIN(person.first_name)\ \n Aggregate: groupBy=[[#person.age Plus Int64(1)]], aggr=[[MIN(#person.first_name)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_with_groupby_non_column_expression_nested_and_not_resolvable( ) { // The query should fail, because age + 9 is not in the group by. let sql = "SELECT ((age + 1) / 2) * (age + 9), MIN(first_name) FROM person GROUP BY age + 1"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r#"Plan("Projection references non-aggregate values")"#, format!("{:?}", err) ); } #[test] fn select_simple_aggregate_with_groupby_non_column_expression_and_its_column_selected( ) { let sql = "SELECT age, MIN(first_name) FROM person GROUP BY age + 1"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( r#"Plan("Projection references non-aggregate values")"#, format!("{:?}", err) ); } #[test] fn select_simple_aggregate_nested_in_binary_expr_with_groupby() { quick_test( "SELECT state, MIN(age) < 10 FROM person GROUP BY state", "Projection: #person.state, #MIN(person.age) Lt Int64(10)\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age)]]\ \n TableScan: person projection=None", ); } #[test] fn select_simple_aggregate_and_nested_groupby_column() { quick_test( "SELECT age + 1, MAX(first_name) FROM person GROUP BY age", "Projection: #person.age Plus Int64(1), #MAX(person.first_name)\ \n Aggregate: groupBy=[[#person.age]], aggr=[[MAX(#person.first_name)]]\ \n TableScan: person projection=None", ); } #[test] fn select_aggregate_compounded_with_groupby_column() { quick_test( "SELECT age + MIN(salary) FROM person GROUP BY age", "Projection: #person.age Plus #MIN(person.salary)\ \n Aggregate: groupBy=[[#person.age]], aggr=[[MIN(#person.salary)]]\ \n TableScan: person projection=None", ); } #[test] fn select_aggregate_with_non_column_inner_expression_with_groupby() { quick_test( "SELECT state, MIN(age + 1) FROM person GROUP BY state", "Projection: #person.state, #MIN(person.age Plus Int64(1))\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MIN(#person.age Plus Int64(1))]]\ \n TableScan: person projection=None", ); } #[test] fn test_wildcard() { quick_test( "SELECT * from person", "Projection: #person.id, #person.first_name, #person.last_name, #person.age, #person.state, #person.salary, #person.birth_date, #person.😀\ \n TableScan: person projection=None", ); } #[test] fn select_count_one() { let sql = "SELECT COUNT(1) FROM person"; let expected = "Projection: #COUNT(UInt8(1))\ \n Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1))]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_count_column() { let sql = "SELECT COUNT(id) FROM person"; let expected = "Projection: #COUNT(person.id)\ \n Aggregate: groupBy=[[]], aggr=[[COUNT(#person.id)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_scalar_func() { let sql = "SELECT sqrt(age) FROM person"; let expected = "Projection: sqrt(#person.age)\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_aliased_scalar_func() { let sql = "SELECT sqrt(person.age) AS square_people FROM person"; let expected = "Projection: sqrt(#person.age) AS square_people\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_where_nullif_division() { let sql = "SELECT c3/(c4+c5) \ FROM aggregate_test_100 WHERE c3/nullif(c4+c5, 0) > 0.1"; let expected = "Projection: #aggregate_test_100.c3 Divide #aggregate_test_100.c4 Plus #aggregate_test_100.c5\ \n Filter: #aggregate_test_100.c3 Divide nullif(#aggregate_test_100.c4 Plus #aggregate_test_100.c5, Int64(0)) Gt Float64(0.1)\ \n TableScan: aggregate_test_100 projection=None"; quick_test(sql, expected); } #[test] fn select_where_with_negative_operator() { let sql = "SELECT c3 FROM aggregate_test_100 WHERE c3 > -0.1 AND -c4 > 0"; let expected = "Projection: #aggregate_test_100.c3\ \n Filter: #aggregate_test_100.c3 Gt Float64(-0.1) And (- #aggregate_test_100.c4) Gt Int64(0)\ \n TableScan: aggregate_test_100 projection=None"; quick_test(sql, expected); } #[test] fn select_where_with_positive_operator() { let sql = "SELECT c3 FROM aggregate_test_100 WHERE c3 > +0.1 AND +c4 > 0"; let expected = "Projection: #aggregate_test_100.c3\ \n Filter: #aggregate_test_100.c3 Gt Float64(0.1) And #aggregate_test_100.c4 Gt Int64(0)\ \n TableScan: aggregate_test_100 projection=None"; quick_test(sql, expected); } #[test] fn select_order_by() { let sql = "SELECT id FROM person ORDER BY id"; let expected = "Sort: #person.id ASC NULLS FIRST\ \n Projection: #person.id\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_order_by_desc() { let sql = "SELECT id FROM person ORDER BY id DESC"; let expected = "Sort: #person.id DESC NULLS FIRST\ \n Projection: #person.id\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_order_by_nulls_last() { quick_test( "SELECT id FROM person ORDER BY id DESC NULLS LAST", "Sort: #person.id DESC NULLS LAST\ \n Projection: #person.id\ \n TableScan: person projection=None", ); quick_test( "SELECT id FROM person ORDER BY id NULLS LAST", "Sort: #person.id ASC NULLS LAST\ \n Projection: #person.id\ \n TableScan: person projection=None", ); } #[test] fn select_group_by() { let sql = "SELECT state FROM person GROUP BY state"; let expected = "Projection: #person.state\ \n Aggregate: groupBy=[[#person.state]], aggr=[[]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_group_by_columns_not_in_select() { let sql = "SELECT MAX(age) FROM person GROUP BY state"; let expected = "Projection: #MAX(person.age)\ \n Aggregate: groupBy=[[#person.state]], aggr=[[MAX(#person.age)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_group_by_count_star() { let sql = "SELECT state, COUNT(*) FROM person GROUP BY state"; let expected = "Projection: #person.state, #COUNT(UInt8(1))\ \n Aggregate: groupBy=[[#person.state]], aggr=[[COUNT(UInt8(1))]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_group_by_needs_projection() { let sql = "SELECT COUNT(state), state FROM person GROUP BY state"; let expected = "\ Projection: #COUNT(person.state), #person.state\ \n Aggregate: groupBy=[[#person.state]], aggr=[[COUNT(#person.state)]]\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_7480_1() { let sql = "SELECT c1, MIN(c12) FROM aggregate_test_100 GROUP BY c1, c13"; let expected = "Projection: #aggregate_test_100.c1, #MIN(aggregate_test_100.c12)\ \n Aggregate: groupBy=[[#aggregate_test_100.c1, #aggregate_test_100.c13]], aggr=[[MIN(#aggregate_test_100.c12)]]\ \n TableScan: aggregate_test_100 projection=None"; quick_test(sql, expected); } #[test] fn select_7480_2() { let sql = "SELECT c1, c13, MIN(c12) FROM aggregate_test_100 GROUP BY c1"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Projection references non-aggregate values\")", format!("{:?}", err) ); } #[test] fn create_external_table_csv() { let sql = "CREATE EXTERNAL TABLE t(c1 int) STORED AS CSV LOCATION 'foo.csv'"; let expected = "CreateExternalTable: \"t\""; quick_test(sql, expected); } #[test] fn create_external_table_csv_no_schema() { let sql = "CREATE EXTERNAL TABLE t STORED AS CSV LOCATION 'foo.csv'"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Column definitions required for CSV files. None found\")", format!("{:?}", err) ); } #[test] fn create_external_table_parquet() { let sql = "CREATE EXTERNAL TABLE t(c1 int) STORED AS PARQUET LOCATION 'foo.parquet'"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"Column definitions can not be specified for PARQUET files.\")", format!("{:?}", err) ); } #[test] fn create_external_table_parquet_no_schema() { let sql = "CREATE EXTERNAL TABLE t STORED AS PARQUET LOCATION 'foo.parquet'"; let expected = "CreateExternalTable: \"t\""; quick_test(sql, expected); } #[test] fn equijoin_explicit_syntax() { let sql = "SELECT id, order_id \ FROM person \ JOIN orders \ ON id = customer_id"; let expected = "Projection: #person.id, #orders.order_id\ \n Join: #person.id = #orders.customer_id\ \n TableScan: person projection=None\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn join_with_table_name() { let sql = "SELECT id, order_id \ FROM person \ JOIN orders \ ON person.id = orders.customer_id"; let expected = "Projection: #person.id, #orders.order_id\ \n Join: #person.id = #orders.customer_id\ \n TableScan: person projection=None\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn join_with_using() { let sql = "SELECT person.first_name, id \ FROM person \ JOIN person as person2 \ USING (id)"; let expected = "Projection: #person.first_name, #person.id\ \n Join: #person.id = #person2.id\ \n TableScan: person projection=None\ \n TableScan: person2 projection=None"; quick_test(sql, expected); } #[test] fn equijoin_explicit_syntax_3_tables() { let sql = "SELECT id, order_id, l_description \ FROM person \ JOIN orders ON id = customer_id \ JOIN lineitem ON o_item_id = l_item_id"; let expected = "Projection: #person.id, #orders.order_id, #lineitem.l_description\ \n Join: #orders.o_item_id = #lineitem.l_item_id\ \n Join: #person.id = #orders.customer_id\ \n TableScan: person projection=None\ \n TableScan: orders projection=None\ \n TableScan: lineitem projection=None"; quick_test(sql, expected); } #[test] fn boolean_literal_in_condition_expression() { let sql = "SELECT order_id \ FROM orders \ WHERE delivered = false OR delivered = true"; let expected = "Projection: #orders.order_id\ \n Filter: #orders.delivered Eq Boolean(false) Or #orders.delivered Eq Boolean(true)\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn union() { let sql = "SELECT order_id from orders UNION ALL SELECT order_id FROM orders"; let expected = "Union\ \n Projection: #orders.order_id\ \n TableScan: orders projection=None\ \n Projection: #orders.order_id\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn union_4_combined_in_one() { let sql = "SELECT order_id from orders UNION ALL SELECT order_id FROM orders UNION ALL SELECT order_id FROM orders UNION ALL SELECT order_id FROM orders"; let expected = "Union\ \n Projection: #orders.order_id\ \n TableScan: orders projection=None\ \n Projection: #orders.order_id\ \n TableScan: orders projection=None\ \n Projection: #orders.order_id\ \n TableScan: orders projection=None\ \n Projection: #orders.order_id\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn union_schemas_should_be_same() { let sql = "SELECT order_id from orders UNION ALL SELECT customer_id FROM orders"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"UNION ALL schemas are expected to be the same\")", format!("{:?}", err) ); } #[test] fn empty_over() { let sql = "SELECT order_id, MAX(order_id) OVER () from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.order_id)\ \n WindowAggr: windowExpr=[[MAX(#orders.order_id)]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn empty_over_with_alias() { let sql = "SELECT order_id oid, MAX(order_id) OVER () max_oid from orders"; let expected = "\ Projection: #orders.order_id AS oid, #MAX(orders.order_id) AS max_oid\ \n WindowAggr: windowExpr=[[MAX(#orders.order_id)]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn empty_over_dup_with_alias() { let sql = "SELECT order_id oid, MAX(order_id) OVER () max_oid, MAX(order_id) OVER () max_oid_dup from orders"; let expected = "\ Projection: #orders.order_id AS oid, #MAX(orders.order_id) AS max_oid, #MAX(orders.order_id) AS max_oid_dup\ \n WindowAggr: windowExpr=[[MAX(#orders.order_id)]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn empty_over_dup_with_different_sort() { let sql = "SELECT order_id oid, MAX(order_id) OVER (), MAX(order_id) OVER (ORDER BY order_id) from orders"; let expected = "\ Projection: #orders.order_id AS oid, #MAX(orders.order_id), #MAX(orders.order_id) ORDER BY [#orders.order_id ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.order_id)]]\ \n WindowAggr: windowExpr=[[MAX(#orders.order_id) ORDER BY [#orders.order_id ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn empty_over_plus() { let sql = "SELECT order_id, MAX(qty * 1.1) OVER () from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty Multiply Float64(1.1))\ \n WindowAggr: windowExpr=[[MAX(#orders.qty Multiply Float64(1.1))]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn empty_over_multiple() { let sql = "SELECT order_id, MAX(qty) OVER (), min(qty) over (), aVg(qty) OVER () from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty), #MIN(orders.qty), #AVG(orders.qty)\ \n WindowAggr: windowExpr=[[MAX(#orders.qty), MIN(#orders.qty), AVG(#orders.qty)]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------- /// WindowAgg (cost=69.83..87.33 rows=1000 width=8) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` #[test] fn over_partition_by() { let sql = "SELECT order_id, MAX(qty) OVER (PARTITION BY order_id) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) PARTITION BY [#orders.order_id]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) PARTITION BY [#orders.order_id]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------------------- /// WindowAgg (cost=137.16..154.66 rows=1000 width=12) /// -> Sort (cost=137.16..139.66 rows=1000 width=12) /// Sort Key: order_id /// -> WindowAgg (cost=69.83..87.33 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id DESC /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` #[test] fn over_order_by() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id), MIN(qty) OVER (ORDER BY order_id DESC) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST], #MIN(orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn over_order_by_with_window_frame_double_end() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id ROWS BETWEEN 3 PRECEDING and 3 FOLLOWING), MIN(qty) OVER (ORDER BY order_id DESC) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST] ROWS BETWEEN 3 PRECEDING AND 3 FOLLOWING, #MIN(orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST] ROWS BETWEEN 3 PRECEDING AND 3 FOLLOWING]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn over_order_by_with_window_frame_single_end() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id ROWS 3 PRECEDING), MIN(qty) OVER (ORDER BY order_id DESC) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST] ROWS BETWEEN 3 PRECEDING AND CURRENT ROW, #MIN(orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST] ROWS BETWEEN 3 PRECEDING AND CURRENT ROW]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn over_order_by_with_window_frame_range_value_check() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id RANGE 3 PRECEDING) from orders"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "NotImplemented(\"With WindowFrameUnits=RANGE, the bound cannot be 3 PRECEDING or FOLLOWING at the moment\")", format!("{:?}", err) ); } #[test] fn over_order_by_with_window_frame_range_order_by_check() { let sql = "SELECT order_id, MAX(qty) OVER (RANGE UNBOUNDED PRECEDING) from orders"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"With window frame of type RANGE, the order by expression must be of length 1, got 0\")", format!("{:?}", err) ); } #[test] fn over_order_by_with_window_frame_range_order_by_check_2() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id, qty RANGE UNBOUNDED PRECEDING) from orders"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "Plan(\"With window frame of type RANGE, the order by expression must be of length 1, got 2\")", format!("{:?}", err) ); } #[test] fn over_order_by_with_window_frame_single_end_groups() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id GROUPS 3 PRECEDING), MIN(qty) OVER (ORDER BY order_id DESC) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST] GROUPS BETWEEN 3 PRECEDING AND CURRENT ROW, #MIN(orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST] GROUPS BETWEEN 3 PRECEDING AND CURRENT ROW]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id DESC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ----------------------------------------------------------------------------------- /// WindowAgg (cost=142.16..162.16 rows=1000 width=16) /// -> Sort (cost=142.16..144.66 rows=1000 width=16) /// Sort Key: order_id /// -> WindowAgg (cost=72.33..92.33 rows=1000 width=16) /// -> Sort (cost=72.33..74.83 rows=1000 width=12) /// Sort Key: ((order_id + 1)) /// -> Seq Scan on orders (cost=0.00..22.50 rows=1000 width=12) /// ``` #[test] fn over_order_by_two_sort_keys() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id), MIN(qty) OVER (ORDER BY (order_id + 1)) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST], #MIN(orders.qty) ORDER BY [#orders.order_id Plus Int64(1) ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id Plus Int64(1) ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------------------------- /// WindowAgg (cost=139.66..172.16 rows=1000 width=24) /// -> WindowAgg (cost=139.66..159.66 rows=1000 width=16) /// -> Sort (cost=139.66..142.16 rows=1000 width=12) /// Sort Key: qty, order_id /// -> WindowAgg (cost=69.83..89.83 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id, qty /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` #[test] fn over_order_by_sort_keys_sorting() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY qty, order_id), SUM(qty) OVER (), MIN(qty) OVER (ORDER BY order_id, qty) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.qty ASC NULLS FIRST, #orders.order_id ASC NULLS FIRST], #SUM(orders.qty), #MIN(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST, #orders.qty ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[SUM(#orders.qty)]]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.qty ASC NULLS FIRST, #orders.order_id ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST, #orders.qty ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------------------- /// WindowAgg (cost=69.83..117.33 rows=1000 width=24) /// -> WindowAgg (cost=69.83..104.83 rows=1000 width=16) /// -> WindowAgg (cost=69.83..89.83 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id, qty /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` /// /// FIXME: for now we are not detecting prefix of sorting keys in order to save one sort exec phase #[test] fn over_order_by_sort_keys_sorting_prefix_compacting() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY order_id), SUM(qty) OVER (), MIN(qty) OVER (ORDER BY order_id, qty) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST], #SUM(orders.qty), #MIN(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST, #orders.qty ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[SUM(#orders.qty)]]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST, #orders.qty ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------------------------- /// WindowAgg (cost=139.66..172.16 rows=1000 width=24) /// -> WindowAgg (cost=139.66..159.66 rows=1000 width=16) /// -> Sort (cost=139.66..142.16 rows=1000 width=12) /// Sort Key: order_id, qty /// -> WindowAgg (cost=69.83..89.83 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: qty, order_id /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` /// /// FIXME: for now we are not detecting prefix of sorting keys in order to re-arrange with global /// sort #[test] fn over_order_by_sort_keys_sorting_global_order_compacting() { let sql = "SELECT order_id, MAX(qty) OVER (ORDER BY qty, order_id), SUM(qty) OVER (), MIN(qty) OVER (ORDER BY order_id, qty) from orders ORDER BY order_id"; let expected = "\ Sort: #orders.order_id ASC NULLS FIRST\ \n Projection: #orders.order_id, #MAX(orders.qty) ORDER BY [#orders.qty ASC NULLS FIRST, #orders.order_id ASC NULLS FIRST], #SUM(orders.qty), #MIN(orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST, #orders.qty ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[SUM(#orders.qty)]]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) ORDER BY [#orders.qty ASC NULLS FIRST, #orders.order_id ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) ORDER BY [#orders.order_id ASC NULLS FIRST, #orders.qty ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------- /// WindowAgg (cost=69.83..89.83 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id, qty /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` #[test] fn over_partition_by_order_by() { let sql = "SELECT order_id, MAX(qty) OVER (PARTITION BY order_id ORDER BY qty) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) PARTITION BY [#orders.order_id] ORDER BY [#orders.qty ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) PARTITION BY [#orders.order_id] ORDER BY [#orders.qty ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------- /// WindowAgg (cost=69.83..89.83 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id, qty /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` #[test] fn over_partition_by_order_by_no_dup() { let sql = "SELECT order_id, MAX(qty) OVER (PARTITION BY order_id, qty ORDER BY qty) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) PARTITION BY [#orders.order_id, #orders.qty] ORDER BY [#orders.qty ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) PARTITION BY [#orders.order_id, #orders.qty] ORDER BY [#orders.qty ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ---------------------------------------------------------------------------------- /// WindowAgg (cost=142.16..162.16 rows=1000 width=16) /// -> Sort (cost=142.16..144.66 rows=1000 width=12) /// Sort Key: qty, order_id /// -> WindowAgg (cost=69.83..92.33 rows=1000 width=12) /// -> Sort (cost=69.83..72.33 rows=1000 width=8) /// Sort Key: order_id, qty /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=8) /// ``` #[test] fn over_partition_by_order_by_mix_up() { let sql = "SELECT order_id, MAX(qty) OVER (PARTITION BY order_id, qty ORDER BY qty), MIN(qty) OVER (PARTITION BY qty ORDER BY order_id) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) PARTITION BY [#orders.order_id, #orders.qty] ORDER BY [#orders.qty ASC NULLS FIRST], #MIN(orders.qty) PARTITION BY [#orders.qty] ORDER BY [#orders.order_id ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) PARTITION BY [#orders.order_id, #orders.qty] ORDER BY [#orders.qty ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) PARTITION BY [#orders.qty] ORDER BY [#orders.order_id ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } /// psql result /// ``` /// QUERY PLAN /// ----------------------------------------------------------------------------- /// WindowAgg (cost=69.83..109.83 rows=1000 width=24) /// -> WindowAgg (cost=69.83..92.33 rows=1000 width=20) /// -> Sort (cost=69.83..72.33 rows=1000 width=16) /// Sort Key: order_id, qty, price /// -> Seq Scan on orders (cost=0.00..20.00 rows=1000 width=16) /// ``` /// FIXME: for now we are not detecting prefix of sorting keys in order to save one sort exec phase #[test] fn over_partition_by_order_by_mix_up_prefix() { let sql = "SELECT order_id, MAX(qty) OVER (PARTITION BY order_id ORDER BY qty), MIN(qty) OVER (PARTITION BY order_id, qty ORDER BY price) from orders"; let expected = "\ Projection: #orders.order_id, #MAX(orders.qty) PARTITION BY [#orders.order_id] ORDER BY [#orders.qty ASC NULLS FIRST], #MIN(orders.qty) PARTITION BY [#orders.order_id, #orders.qty] ORDER BY [#orders.price ASC NULLS FIRST]\ \n WindowAggr: windowExpr=[[MAX(#orders.qty) PARTITION BY [#orders.order_id] ORDER BY [#orders.qty ASC NULLS FIRST]]]\ \n WindowAggr: windowExpr=[[MIN(#orders.qty) PARTITION BY [#orders.order_id, #orders.qty] ORDER BY [#orders.price ASC NULLS FIRST]]]\ \n TableScan: orders projection=None"; quick_test(sql, expected); } #[test] fn only_union_all_supported() { let sql = "SELECT order_id from orders EXCEPT SELECT order_id FROM orders"; let err = logical_plan(sql).expect_err("query should have failed"); assert_eq!( "NotImplemented(\"Only UNION ALL is supported, found EXCEPT\")", format!("{:?}", err) ); } #[test] fn select_typedstring() { let sql = "SELECT date '2020-12-10' AS date FROM person"; let expected = "Projection: CAST(Utf8(\"2020-12-10\") AS Date32) AS date\ \n TableScan: person projection=None"; quick_test(sql, expected); } #[test] fn select_multibyte_column() { let sql = r#"SELECT "😀" FROM person"#; let expected = "Projection: #person.😀\ \n TableScan: person projection=None"; quick_test(sql, expected); } fn logical_plan(sql: &str) -> Result<LogicalPlan> { let planner = SqlToRel::new(&MockContextProvider {}); let result = DFParser::parse_sql(sql); let ast = result.unwrap(); planner.statement_to_plan(&ast[0]) } /// Create logical plan, write with formatter, compare to expected output fn quick_test(sql: &str, expected: &str) { let plan = logical_plan(sql).unwrap(); assert_eq!(format!("{:?}", plan), expected); } struct MockContextProvider {} impl ContextProvider for MockContextProvider { fn get_table_provider( &self, name: TableReference, ) -> Option<Arc<dyn TableProvider>> { let schema = match name.table() { "person" => Some(Schema::new(vec![ Field::new("id", DataType::UInt32, false), Field::new("first_name", DataType::Utf8, false), Field::new("last_name", DataType::Utf8, false), Field::new("age", DataType::Int32, false), Field::new("state", DataType::Utf8, false), Field::new("salary", DataType::Float64, false), Field::new( "birth_date", DataType::Timestamp(TimeUnit::Nanosecond, None), false, ), Field::new("😀", DataType::Int32, false), ])), "orders" => Some(Schema::new(vec![ Field::new("order_id", DataType::UInt32, false), Field::new("customer_id", DataType::UInt32, false), Field::new("o_item_id", DataType::Utf8, false), Field::new("qty", DataType::Int32, false), Field::new("price", DataType::Float64, false), Field::new("delivered", DataType::Boolean, false), ])), "lineitem" => Some(Schema::new(vec![ Field::new("l_item_id", DataType::UInt32, false), Field::new("l_description", DataType::Utf8, false), Field::new("price", DataType::Float64, false), ])), "aggregate_test_100" => Some(Schema::new(vec![ Field::new("c1", DataType::Utf8, false), Field::new("c2", DataType::UInt32, false), Field::new("c3", DataType::Int8, false), Field::new("c4", DataType::Int16, false), Field::new("c5", DataType::Int32, false), Field::new("c6", DataType::Int64, false), Field::new("c7", DataType::UInt8, false), Field::new("c8", DataType::UInt16, false), Field::new("c9", DataType::UInt32, false), Field::new("c10", DataType::UInt64, false), Field::new("c11", DataType::Float32, false), Field::new("c12", DataType::Float64, false), Field::new("c13", DataType::Utf8, false), ])), _ => None, }; schema.map(|s| -> Arc<dyn TableProvider> { Arc::new(EmptyTable::new(Arc::new(s))) }) } fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> { let f: ScalarFunctionImplementation = Arc::new(|_| Err(DataFusionError::NotImplemented("".to_string()))); match name { "my_sqrt" => Some(Arc::new(create_udf( "my_sqrt", vec![DataType::Float64], Arc::new(DataType::Float64), f, ))), _ => None, } } fn get_aggregate_meta(&self, _name: &str) -> Option<Arc<AggregateUDF>> { unimplemented!() } } }
41.244954
343
0.530729
015d84f735b7cfdfefe09e1188d77a62aa399a7b
696
use crate::lib::env::Env; use crate::lib::error::NnsCliResult; use clap::Clap; mod get_proposal_info; mod submit_proposal; /// Call the governance canister #[derive(Clap)] #[clap(name("governance"))] pub struct GovernanceOpts { #[clap(subcommand)] subcmd: SubCommand, } #[derive(Clap)] enum SubCommand { GetProposalInfo(get_proposal_info::GetProposalInfoOpts), SubmitProposal(submit_proposal::SubmitProposalOpts), } pub async fn exec(opts: GovernanceOpts, env: Env) -> NnsCliResult { match opts.subcmd { SubCommand::GetProposalInfo(v) => get_proposal_info::exec(v, env).await, SubCommand::SubmitProposal(v) => submit_proposal::exec(v, env).await, } }
24
80
0.712644
18bf66ceb3501fc0d0ff14dbf49380f5a75d5535
8,724
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::fold::{TypeFoldable, TypeVisitor}; use rustc::util::nodemap::FxHashSet; use rustc::mir::interpret::ConstValue; use syntax::source_map::Span; #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Parameter(pub u32); impl From<ty::ParamTy> for Parameter { fn from(param: ty::ParamTy) -> Self { Parameter(param.idx) } } impl From<ty::EarlyBoundRegion> for Parameter { fn from(param: ty::EarlyBoundRegion) -> Self { Parameter(param.index) } } impl From<ty::ParamConst> for Parameter { fn from(param: ty::ParamConst) -> Self { Parameter(param.index) } } /// Returns the set of parameters constrained by the impl header. pub fn parameters_for_impl<'tcx>(impl_self_ty: Ty<'tcx>, impl_trait_ref: Option<ty::TraitRef<'tcx>>) -> FxHashSet<Parameter> { let vec = match impl_trait_ref { Some(tr) => parameters_for(&tr, false), None => parameters_for(&impl_self_ty, false), }; vec.into_iter().collect() } /// If `include_projections` is false, returns the list of parameters that are /// constrained by `t` - i.e., the value of each parameter in the list is /// uniquely determined by `t` (see RFC 447). If it is true, return the list /// of parameters whose values are needed in order to constrain `ty` - these /// differ, with the latter being a superset, in the presence of projections. pub fn parameters_for<'tcx, T>(t: &T, include_nonconstraining: bool) -> Vec<Parameter> where T: TypeFoldable<'tcx> { let mut collector = ParameterCollector { parameters: vec![], include_nonconstraining, }; t.visit_with(&mut collector); collector.parameters } struct ParameterCollector { parameters: Vec<Parameter>, include_nonconstraining: bool } impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { match t.sty { ty::Projection(..) | ty::Opaque(..) if !self.include_nonconstraining => { // projections are not injective return false; } ty::Param(data) => { self.parameters.push(Parameter::from(data)); } _ => {} } t.super_visit_with(self) } fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { if let ty::ReEarlyBound(data) = *r { self.parameters.push(Parameter::from(data)); } false } fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { if let ConstValue::Param(data) = c.val { self.parameters.push(Parameter::from(data)); } false } } pub fn identify_constrained_generic_params<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, predicates: &ty::GenericPredicates<'tcx>, impl_trait_ref: Option<ty::TraitRef<'tcx>>, input_parameters: &mut FxHashSet<Parameter>) { let mut predicates = predicates.predicates.clone(); setup_constraining_predicates(tcx, &mut predicates, impl_trait_ref, input_parameters); } /// Order the predicates in `predicates` such that each parameter is /// constrained before it is used, if that is possible, and add the /// parameters so constrained to `input_parameters`. For example, /// imagine the following impl: /// /// impl<T: Debug, U: Iterator<Item = T>> Trait for U /// /// The impl's predicates are collected from left to right. Ignoring /// the implicit `Sized` bounds, these are /// * T: Debug /// * U: Iterator /// * <U as Iterator>::Item = T -- a desugared ProjectionPredicate /// /// When we, for example, try to go over the trait-reference /// `IntoIter<u32> as Trait`, we substitute the impl parameters with fresh /// variables and match them with the impl trait-ref, so we know that /// `$U = IntoIter<u32>`. /// /// However, in order to process the `$T: Debug` predicate, we must first /// know the value of `$T` - which is only given by processing the /// projection. As we occasionally want to process predicates in a single /// pass, we want the projection to come first. In fact, as projections /// can (acyclically) depend on one another - see RFC447 for details - we /// need to topologically sort them. /// /// We *do* have to be somewhat careful when projection targets contain /// projections themselves, for example in /// impl<S,U,V,W> Trait for U where /// /* 0 */ S: Iterator<Item = U>, /// /* - */ U: Iterator, /// /* 1 */ <U as Iterator>::Item: ToOwned<Owned=(W,<V as Iterator>::Item)> /// /* 2 */ W: Iterator<Item = V> /// /* 3 */ V: Debug /// we have to evaluate the projections in the order I wrote them: /// `V: Debug` requires `V` to be evaluated. The only projection that /// *determines* `V` is 2 (1 contains it, but *does not determine it*, /// as it is only contained within a projection), but that requires `W` /// which is determined by 1, which requires `U`, that is determined /// by 0. I should probably pick a less tangled example, but I can't /// think of any. pub fn setup_constraining_predicates<'tcx>(tcx: TyCtxt<'_, '_, '_>, predicates: &mut [(ty::Predicate<'tcx>, Span)], impl_trait_ref: Option<ty::TraitRef<'tcx>>, input_parameters: &mut FxHashSet<Parameter>) { // The canonical way of doing the needed topological sort // would be a DFS, but getting the graph and its ownership // right is annoying, so I am using an in-place fixed-point iteration, // which is `O(nt)` where `t` is the depth of type-parameter constraints, // remembering that `t` should be less than 7 in practice. // // Basically, I iterate over all projections and swap every // "ready" projection to the start of the list, such that // all of the projections before `i` are topologically sorted // and constrain all the parameters in `input_parameters`. // // In the example, `input_parameters` starts by containing `U` - which // is constrained by the trait-ref - and so on the first pass we // observe that `<U as Iterator>::Item = T` is a "ready" projection that // constrains `T` and swap it to front. As it is the sole projection, // no more swaps can take place afterwards, with the result being // * <U as Iterator>::Item = T // * T: Debug // * U: Iterator debug!("setup_constraining_predicates: predicates={:?} \ impl_trait_ref={:?} input_parameters={:?}", predicates, impl_trait_ref, input_parameters); let mut i = 0; let mut changed = true; while changed { changed = false; for j in i..predicates.len() { if let ty::Predicate::Projection(ref poly_projection) = predicates[j].0 { // Note that we can skip binder here because the impl // trait ref never contains any late-bound regions. let projection = poly_projection.skip_binder(); // Special case: watch out for some kind of sneaky attempt // to project out an associated type defined by this very // trait. let unbound_trait_ref = projection.projection_ty.trait_ref(tcx); if Some(unbound_trait_ref.clone()) == impl_trait_ref { continue; } // A projection depends on its input types and determines its output // type. For example, if we have // `<<T as Bar>::Baz as Iterator>::Output = <U as Iterator>::Output` // Then the projection only applies if `T` is known, but it still // does not determine `U`. let inputs = parameters_for(&projection.projection_ty.trait_ref(tcx), true); let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p)); if !relies_only_on_inputs { continue; } input_parameters.extend(parameters_for(&projection.ty, false)); } else { continue; } // fancy control flow to bypass borrow checker predicates.swap(i, j); i += 1; changed = true; } debug!("setup_constraining_predicates: predicates={:?} \ i={} impl_trait_ref={:?} input_parameters={:?}", predicates, i, impl_trait_ref, input_parameters); } }
41.741627
97
0.60282
db771f936e3b3d427565897200bb1dff5cca616e
2,522
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::IOM0IRQ { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct IOM0IRQR { bits: u8, } impl IOM0IRQR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _IOM0IRQW<'a> { w: &'a mut W, } impl<'a> _IOM0IRQW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 63; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:5 - IOMSTR0 IRQ pad select."] #[inline] pub fn iom0irq(&self) -> IOM0IRQR { let bits = { const MASK: u8 = 63; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; IOM0IRQR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 63 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:5 - IOMSTR0 IRQ pad select."] #[inline] pub fn iom0irq(&mut self) -> _IOM0IRQW { _IOM0IRQW { w: self } } }
23.792453
59
0.496828
9b9c174b525650eba1cc3b4d4e8f005e7c7834d3
1,482
use common::set_globals; use rustup::Cfg; use errors::*; use rustup_utils::utils; use rustup::command::run_command_for_dir; use std::env; use std::ffi::OsString; use std::path::PathBuf; use job; pub fn main() -> Result<()> { ::self_update::cleanup_self_updater()?; let _setup = job::setup(); let mut args = env::args(); let arg0 = args.next().map(PathBuf::from); let arg0 = arg0.as_ref() .and_then(|a| a.file_name()) .and_then(|a| a.to_str()); let ref arg0 = arg0.ok_or(ErrorKind::NoExeName)?; // Check for a toolchain specifier. let arg1 = args.next(); let toolchain = arg1.as_ref().and_then(|arg1| { if arg1.starts_with('+') { Some(&arg1[1..]) } else { None } }); // Build command args now while we know whether or not to skip arg 1. let cmd_args: Vec<_> = if toolchain.is_none() { env::args_os().skip(1).collect() } else { env::args_os().skip(2).collect() }; let cfg = set_globals(false)?; cfg.check_metadata_version()?; direct_proxy(&cfg, arg0, toolchain, &cmd_args)?; Ok(()) } fn direct_proxy(cfg: &Cfg, arg0: &str, toolchain: Option<&str>, args: &[OsString]) -> Result<()> { let cmd = match toolchain { None => cfg.create_command_for_dir(&utils::current_dir()?, arg0)?, Some(tc) => cfg.create_command_for_toolchain(tc, false, arg0)?, }; Ok(run_command_for_dir(cmd, arg0, args, &cfg)?) }
26.945455
98
0.597841
b9854c414f64aadbf364b53b411e6d9d7a7e5830
35,947
// Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. #![allow(clippy::too_many_arguments)] // This module was originally forked from petgraph's isomorphism module @ v0.5.0 // to handle PyDiGraph inputs instead of petgraph's generic Graph. However it has // since diverged significantly from the original petgraph implementation. use std::cmp::{Ordering, Reverse}; use std::iter::Iterator; use std::marker; use crate::dictmap::*; use hashbrown::HashMap; use pyo3::class::iter::{IterNextOutput, PyIterProtocol}; use pyo3::gc::{PyGCProtocol, PyVisit}; use pyo3::prelude::*; use pyo3::PyTraverseError; use petgraph::stable_graph::NodeIndex; use petgraph::stable_graph::StableGraph; use petgraph::visit::{EdgeRef, IntoEdgeReferences, NodeIndexable}; use petgraph::EdgeType; use petgraph::{Directed, Incoming, Outgoing, Undirected}; use rayon::slice::ParallelSliceMut; use crate::iterators::NodeMap; type StablePyGraph<Ty> = StableGraph<PyObject, PyObject, Ty>; /// Returns `true` if we can map every element of `xs` to a unique /// element of `ys` while using `matcher` func to compare two elements. fn is_subset<T: Copy, F>(xs: &[T], ys: &[T], matcher: F) -> PyResult<bool> where F: Fn(T, T) -> PyResult<bool>, { let mut valid = vec![true; ys.len()]; for &a in xs { let mut found = false; for (&b, free) in ys.iter().zip(valid.iter_mut()) { if *free && matcher(a, b)? { found = true; *free = false; break; } } if !found { return Ok(false); } } Ok(true) } #[inline] fn sorted<N: std::cmp::PartialOrd>(x: &mut (N, N)) { let (a, b) = x; if b < a { std::mem::swap(a, b) } } /// Returns the adjacency matrix of a graph as a dictionary /// with `(i, j)` entry equal to number of edges from node `i` to node `j`. fn adjacency_matrix<Ty: EdgeType>( graph: &StablePyGraph<Ty>, ) -> HashMap<(NodeIndex, NodeIndex), usize> { let mut matrix = HashMap::with_capacity(graph.edge_count()); for edge in graph.edge_references() { let mut item = (edge.source(), edge.target()); if !graph.is_directed() { sorted(&mut item); } let entry = matrix.entry(item).or_insert(0); *entry += 1; } matrix } /// Returns the number of edges from node `a` to node `b`. fn edge_multiplicity<Ty: EdgeType>( graph: &StablePyGraph<Ty>, matrix: &HashMap<(NodeIndex, NodeIndex), usize>, a: NodeIndex, b: NodeIndex, ) -> usize { let mut item = (a, b); if !graph.is_directed() { sorted(&mut item); } *matrix.get(&item).unwrap_or(&0) } /// Nodes `a`, `b` are adjacent if the number of edges /// from node `a` to node `b` is greater than `val`. fn is_adjacent<Ty: EdgeType>( graph: &StablePyGraph<Ty>, matrix: &HashMap<(NodeIndex, NodeIndex), usize>, a: NodeIndex, b: NodeIndex, val: usize, ) -> bool { edge_multiplicity(graph, matrix, a, b) >= val } trait NodeSorter<Ty> where Ty: EdgeType, { fn sort(&self, _: &StablePyGraph<Ty>) -> Vec<NodeIndex>; fn reorder( &self, py: Python, graph: &StablePyGraph<Ty>, ) -> (StablePyGraph<Ty>, HashMap<usize, usize>) { let order = self.sort(graph); let mut new_graph = StablePyGraph::<Ty>::with_capacity( graph.node_count(), graph.edge_count(), ); let mut id_map: HashMap<NodeIndex, NodeIndex> = HashMap::with_capacity(graph.node_count()); for node_index in order { let node_data = graph.node_weight(node_index).unwrap(); let new_index = new_graph.add_node(node_data.clone_ref(py)); id_map.insert(node_index, new_index); } for edge in graph.edge_references() { let edge_w = edge.weight(); let p_index = id_map[&edge.source()]; let c_index = id_map[&edge.target()]; new_graph.add_edge(p_index, c_index, edge_w.clone_ref(py)); } ( new_graph, id_map.iter().map(|(k, v)| (v.index(), k.index())).collect(), ) } } /// Sort nodes based on node ids. struct DefaultIdSorter; impl<Ty> NodeSorter<Ty> for DefaultIdSorter where Ty: EdgeType, { fn sort(&self, graph: &StablePyGraph<Ty>) -> Vec<NodeIndex> { graph.node_indices().collect() } } /// Sort nodes based on VF2++ heuristic. struct Vf2ppSorter; impl<Ty> NodeSorter<Ty> for Vf2ppSorter where Ty: EdgeType, { fn sort(&self, graph: &StablePyGraph<Ty>) -> Vec<NodeIndex> { let n = graph.node_bound(); let dout: Vec<usize> = (0..n) .map(|idx| { graph .neighbors_directed(graph.from_index(idx), Outgoing) .count() }) .collect(); let mut din: Vec<usize> = vec![0; n]; if graph.is_directed() { din = (0..n) .map(|idx| { graph .neighbors_directed(graph.from_index(idx), Incoming) .count() }) .collect(); } let mut conn_in: Vec<usize> = vec![0; n]; let mut conn_out: Vec<usize> = vec![0; n]; let mut order: Vec<NodeIndex> = Vec::with_capacity(n); // Process BFS level let mut process = |mut vd: Vec<usize>| -> Vec<usize> { // repeatedly bring largest element in front. for i in 0..vd.len() { let (index, &item) = vd[i..] .iter() .enumerate() .max_by_key(|&(_, &node)| { ( conn_in[node], dout[node], conn_out[node], din[node], Reverse(node), ) }) .unwrap(); vd.swap(i, i + index); order.push(NodeIndex::new(item)); for neigh in graph.neighbors_directed(graph.from_index(item), Outgoing) { conn_in[graph.to_index(neigh)] += 1; } if graph.is_directed() { for neigh in graph .neighbors_directed(graph.from_index(item), Incoming) { conn_out[graph.to_index(neigh)] += 1; } } } vd }; let mut seen: Vec<bool> = vec![false; n]; // Create BFS Tree from root and process each level. let mut bfs_tree = |root: usize| { if seen[root] { return; } let mut next_level: Vec<usize> = Vec::new(); seen[root] = true; next_level.push(root); while !next_level.is_empty() { let this_level = next_level; let this_level = process(this_level); next_level = Vec::new(); for bfs_node in this_level { for neighbor in graph.neighbors_directed( graph.from_index(bfs_node), Outgoing, ) { let neigh = graph.to_index(neighbor); if !seen[neigh] { seen[neigh] = true; next_level.push(neigh); } } } } }; let mut sorted_nodes: Vec<usize> = graph.node_indices().map(|node| node.index()).collect(); sorted_nodes .par_sort_by_key(|&node| (dout[node], din[node], Reverse(node))); sorted_nodes.reverse(); for node in sorted_nodes { bfs_tree(node); } order } } #[derive(Debug)] struct Vf2State<Ty> where Ty: EdgeType, { graph: StablePyGraph<Ty>, /// The current mapping M(s) of nodes from G0 → G1 and G1 → G0, /// NodeIndex::end() for no mapping. mapping: Vec<NodeIndex>, /// out[i] is non-zero if i is in either M_0(s) or Tout_0(s) /// These are all the next vertices that are not mapped yet, but /// have an outgoing edge from the mapping. out: Vec<usize>, /// ins[i] is non-zero if i is in either M_0(s) or Tin_0(s) /// These are all the incoming vertices, those not mapped yet, but /// have an edge from them into the mapping. /// Unused if graph is undirected -- it's identical with out in that case. ins: Vec<usize>, out_size: usize, ins_size: usize, adjacency_matrix: HashMap<(NodeIndex, NodeIndex), usize>, generation: usize, _etype: marker::PhantomData<Directed>, } impl<Ty> Vf2State<Ty> where Ty: EdgeType, { pub fn new(graph: StablePyGraph<Ty>) -> Self { let c0 = graph.node_count(); let is_directed = graph.is_directed(); let adjacency_matrix = adjacency_matrix(&graph); Vf2State { graph, mapping: vec![NodeIndex::end(); c0], out: vec![0; c0], ins: vec![0; c0 * (is_directed as usize)], out_size: 0, ins_size: 0, adjacency_matrix, generation: 0, _etype: marker::PhantomData, } } /// Return **true** if we have a complete mapping pub fn is_complete(&self) -> bool { self.generation == self.mapping.len() } /// Add mapping **from** <-> **to** to the state. pub fn push_mapping(&mut self, from: NodeIndex, to: NodeIndex) { self.generation += 1; let s = self.generation; self.mapping[from.index()] = to; // update T0 & T1 ins/outs // T0out: Node in G0 not in M0 but successor of a node in M0. // st.out[0]: Node either in M0 or successor of M0 for ix in self.graph.neighbors(from) { if self.out[ix.index()] == 0 { self.out[ix.index()] = s; self.out_size += 1; } } if self.graph.is_directed() { for ix in self.graph.neighbors_directed(from, Incoming) { if self.ins[ix.index()] == 0 { self.ins[ix.index()] = s; self.ins_size += 1; } } } } /// Restore the state to before the last added mapping pub fn pop_mapping(&mut self, from: NodeIndex) { let s = self.generation; self.generation -= 1; // undo (n, m) mapping self.mapping[from.index()] = NodeIndex::end(); // unmark in ins and outs for ix in self.graph.neighbors(from) { if self.out[ix.index()] == s { self.out[ix.index()] = 0; self.out_size -= 1; } } if self.graph.is_directed() { for ix in self.graph.neighbors_directed(from, Incoming) { if self.ins[ix.index()] == s { self.ins[ix.index()] = 0; self.ins_size -= 1; } } } } /// Find the next (least) node in the Tout set. pub fn next_out_index(&self, from_index: usize) -> Option<usize> { self.out[from_index..] .iter() .enumerate() .find(move |&(index, elt)| { *elt > 0 && self.mapping[from_index + index] == NodeIndex::end() }) .map(|(index, _)| index) } /// Find the next (least) node in the Tin set. pub fn next_in_index(&self, from_index: usize) -> Option<usize> { self.ins[from_index..] .iter() .enumerate() .find(move |&(index, elt)| { *elt > 0 && self.mapping[from_index + index] == NodeIndex::end() }) .map(|(index, _)| index) } /// Find the next (least) node in the N - M set. pub fn next_rest_index(&self, from_index: usize) -> Option<usize> { self.mapping[from_index..] .iter() .enumerate() .find(|&(_, elt)| *elt == NodeIndex::end()) .map(|(index, _)| index) } } trait SemanticMatcher<T> { fn enabled(&self) -> bool; fn eq(&self, _: Python, _: &T, _: &T) -> PyResult<bool>; } impl SemanticMatcher<PyObject> for Option<PyObject> { #[inline] fn enabled(&self) -> bool { self.is_some() } #[inline] fn eq(&self, py: Python, a: &PyObject, b: &PyObject) -> PyResult<bool> { let res = self.as_ref().unwrap().call1(py, (a, b))?; res.is_true(py) } } /// [Graph] Return `true` if the graphs `g0` and `g1` are (sub) graph isomorphic. /// /// Using the VF2 algorithm, examining both syntactic and semantic /// graph isomorphism (graph structure and matching node and edge weights). /// /// The graphs should not be multigraphs. pub fn is_isomorphic<Ty: EdgeType>( py: Python, g0: &StablePyGraph<Ty>, g1: &StablePyGraph<Ty>, node_match: Option<PyObject>, edge_match: Option<PyObject>, id_order: bool, ordering: Ordering, induced: bool, call_limit: Option<usize>, ) -> PyResult<bool> { if (g0.node_count().cmp(&g1.node_count()).then(ordering) != ordering) || (g0.edge_count().cmp(&g1.edge_count()).then(ordering) != ordering) { return Ok(false); } let mut vf2 = Vf2Algorithm::new( py, g0, g1, node_match, edge_match, id_order, ordering, induced, call_limit, ); if vf2.next(py)?.is_some() { return Ok(true); } Ok(false) } #[derive(Copy, Clone, PartialEq, Debug)] enum OpenList { Out, In, Other, } #[derive(Clone, PartialEq, Debug)] enum Frame<N: marker::Copy> { Outer, Inner { nodes: [N; 2], open_list: OpenList }, Unwind { nodes: [N; 2], open_list: OpenList }, } struct Vf2Algorithm<Ty, F, G> where Ty: EdgeType, F: SemanticMatcher<PyObject>, G: SemanticMatcher<PyObject>, { st: [Vf2State<Ty>; 2], node_match: F, edge_match: G, ordering: Ordering, induced: bool, node_map_g0: HashMap<usize, usize>, node_map_g1: HashMap<usize, usize>, stack: Vec<Frame<NodeIndex>>, call_limit: Option<usize>, _counter: usize, } impl<Ty, F, G> Vf2Algorithm<Ty, F, G> where Ty: EdgeType, F: SemanticMatcher<PyObject>, G: SemanticMatcher<PyObject>, { pub fn new( py: Python, g0: &StablePyGraph<Ty>, g1: &StablePyGraph<Ty>, node_match: F, edge_match: G, id_order: bool, ordering: Ordering, induced: bool, call_limit: Option<usize>, ) -> Self { let (g0, node_map_g0) = if id_order { DefaultIdSorter.reorder(py, g0) } else { Vf2ppSorter.reorder(py, g0) }; let (g1, node_map_g1) = if id_order { DefaultIdSorter.reorder(py, g1) } else { Vf2ppSorter.reorder(py, g1) }; let st = [Vf2State::new(g0), Vf2State::new(g1)]; Vf2Algorithm { st, node_match, edge_match, ordering, induced, node_map_g0, node_map_g1, stack: vec![Frame::Outer], call_limit, _counter: 0, } } fn mapping(&self) -> NodeMap { let mut mapping: DictMap<usize, usize> = DictMap::new(); self.st[1] .mapping .iter() .enumerate() .for_each(|(index, val)| { mapping.insert( self.node_map_g0[&val.index()], self.node_map_g1[&index], ); }); NodeMap { node_map: mapping } } fn next_candidate( st: &mut [Vf2State<Ty>; 2], ) -> Option<(NodeIndex, NodeIndex, OpenList)> { let mut to_index; let mut from_index = None; let mut open_list = OpenList::Out; // Try the out list to_index = st[1].next_out_index(0); if to_index.is_some() { from_index = st[0].next_out_index(0); open_list = OpenList::Out; } // Try the in list if to_index.is_none() || from_index.is_none() { to_index = st[1].next_in_index(0); if to_index.is_some() { from_index = st[0].next_in_index(0); open_list = OpenList::In; } } // Try the other list -- disconnected graph if to_index.is_none() || from_index.is_none() { to_index = st[1].next_rest_index(0); if to_index.is_some() { from_index = st[0].next_rest_index(0); open_list = OpenList::Other; } } match (from_index, to_index) { (Some(n), Some(m)) => { Some((NodeIndex::new(n), NodeIndex::new(m), open_list)) } // No more candidates _ => None, } } fn next_from_ix( st: &mut [Vf2State<Ty>; 2], nx: NodeIndex, open_list: OpenList, ) -> Option<NodeIndex> { // Find the next node index to try on the `from` side of the mapping let start = nx.index() + 1; let cand0 = match open_list { OpenList::Out => st[0].next_out_index(start), OpenList::In => st[0].next_in_index(start), OpenList::Other => st[0].next_rest_index(start), } .map(|c| c + start); // compensate for start offset. match cand0 { None => None, // no more candidates Some(ix) => { debug_assert!(ix >= start); Some(NodeIndex::new(ix)) } } } fn pop_state(st: &mut [Vf2State<Ty>; 2], nodes: [NodeIndex; 2]) { // Restore state. st[0].pop_mapping(nodes[0]); st[1].pop_mapping(nodes[1]); } fn push_state(st: &mut [Vf2State<Ty>; 2], nodes: [NodeIndex; 2]) { // Add mapping nx <-> mx to the state st[0].push_mapping(nodes[0], nodes[1]); st[1].push_mapping(nodes[1], nodes[0]); } fn is_feasible( py: Python, st: &mut [Vf2State<Ty>; 2], nodes: [NodeIndex; 2], node_match: &mut F, edge_match: &mut G, ordering: Ordering, induced: bool, ) -> PyResult<bool> { // Check syntactic feasibility of mapping by ensuring adjacencies // of nx map to adjacencies of mx. // // nx == map to => mx // // R_succ // // Check that every neighbor of nx is mapped to a neighbor of mx, // then check the reverse, from mx to nx. Check that they have the same // count of edges. // // Note: We want to check the lookahead measures here if we can, // R_out: Equal for G0, G1: Card(Succ(G, n) ^ Tout); for both Succ and Pred // R_in: Same with Tin // R_new: Equal for G0, G1: Ñ n Pred(G, n); both Succ and Pred, // Ñ is G0 - M - Tin - Tout let end = NodeIndex::end(); let mut succ_count = [0, 0]; for j in 0..2 { for n_neigh in st[j].graph.neighbors(nodes[j]) { succ_count[j] += 1; if !induced && j == 0 { continue; } // handle the self loop case; it's not in the mapping (yet) let m_neigh = if nodes[j] != n_neigh { st[j].mapping[n_neigh.index()] } else { nodes[1 - j] }; if m_neigh == end { continue; } let val = edge_multiplicity( &st[j].graph, &st[j].adjacency_matrix, nodes[j], n_neigh, ); let has_edge = is_adjacent( &st[1 - j].graph, &st[1 - j].adjacency_matrix, nodes[1 - j], m_neigh, val, ); if !has_edge { return Ok(false); } } } if succ_count[0].cmp(&succ_count[1]).then(ordering) != ordering { return Ok(false); } // R_pred if st[0].graph.is_directed() { let mut pred_count = [0, 0]; for j in 0..2 { for n_neigh in st[j].graph.neighbors_directed(nodes[j], Incoming) { pred_count[j] += 1; if !induced && j == 0 { continue; } // the self loop case is handled in outgoing let m_neigh = st[j].mapping[n_neigh.index()]; if m_neigh == end { continue; } let val = edge_multiplicity( &st[j].graph, &st[j].adjacency_matrix, n_neigh, nodes[j], ); let has_edge = is_adjacent( &st[1 - j].graph, &st[1 - j].adjacency_matrix, m_neigh, nodes[1 - j], val, ); if !has_edge { return Ok(false); } } } if pred_count[0].cmp(&pred_count[1]).then(ordering) != ordering { return Ok(false); } } macro_rules! rule { ($arr:ident, $j:expr, $dir:expr) => {{ let mut count = 0; for n_neigh in st[$j].graph.neighbors_directed(nodes[$j], $dir) { let index = n_neigh.index(); if st[$j].$arr[index] > 0 && st[$j].mapping[index] == end { count += 1; } } count }}; } // R_out if rule!(out, 0, Outgoing) .cmp(&rule!(out, 1, Outgoing)) .then(ordering) != ordering { return Ok(false); } if st[0].graph.is_directed() && rule!(out, 0, Incoming) .cmp(&rule!(out, 1, Incoming)) .then(ordering) != ordering { return Ok(false); } // R_in if st[0].graph.is_directed() { if rule!(ins, 0, Outgoing) .cmp(&rule!(ins, 1, Outgoing)) .then(ordering) != ordering { return Ok(false); } if rule!(ins, 0, Incoming) .cmp(&rule!(ins, 1, Incoming)) .then(ordering) != ordering { return Ok(false); } } // R_new if induced { let mut new_count = [0, 0]; for j in 0..2 { for n_neigh in st[j].graph.neighbors(nodes[j]) { let index = n_neigh.index(); if st[j].out[index] == 0 && (st[j].ins.is_empty() || st[j].ins[index] == 0) { new_count[j] += 1; } } } if new_count[0].cmp(&new_count[1]).then(ordering) != ordering { return Ok(false); } if st[0].graph.is_directed() { let mut new_count = [0, 0]; for j in 0..2 { for n_neigh in st[j].graph.neighbors_directed(nodes[j], Incoming) { let index = n_neigh.index(); if st[j].out[index] == 0 && st[j].ins[index] == 0 { new_count[j] += 1; } } } if new_count[0].cmp(&new_count[1]).then(ordering) != ordering { return Ok(false); } } } // semantic feasibility: compare associated data for nodes if node_match.enabled() && !node_match.eq( py, &st[0].graph[nodes[0]], &st[1].graph[nodes[1]], )? { return Ok(false); } // semantic feasibility: compare associated data for edges if edge_match.enabled() { let matcher = |a: (NodeIndex, &PyObject), b: (NodeIndex, &PyObject)| -> PyResult<bool> { let (nx, n_edge) = a; let (mx, m_edge) = b; if nx == mx && edge_match.eq(py, n_edge, m_edge)? { return Ok(true); } Ok(false) }; // outgoing edges let range = if induced { 0..2 } else { 1..2 }; for j in range { let e_first: Vec<(NodeIndex, &PyObject)> = st[j] .graph .edges(nodes[j]) .filter_map(|edge| { let n_neigh = edge.target(); let m_neigh = if nodes[j] != n_neigh { st[j].mapping[n_neigh.index()] } else { nodes[1 - j] }; if m_neigh == end { return None; } Some((m_neigh, edge.weight())) }) .collect(); let e_second: Vec<(NodeIndex, &PyObject)> = st[1 - j] .graph .edges(nodes[1 - j]) .map(|edge| (edge.target(), edge.weight())) .collect(); if !is_subset(&e_first, &e_second, matcher)? { return Ok(false); }; } // incoming edges if st[0].graph.is_directed() { let range = if induced { 0..2 } else { 1..2 }; for j in range { let e_first: Vec<(NodeIndex, &PyObject)> = st[j] .graph .edges_directed(nodes[j], Incoming) .filter_map(|edge| { let n_neigh = edge.source(); let m_neigh = if nodes[j] != n_neigh { st[j].mapping[n_neigh.index()] } else { nodes[1 - j] }; if m_neigh == end { return None; } Some((m_neigh, edge.weight())) }) .collect(); let e_second: Vec<(NodeIndex, &PyObject)> = st[1 - j] .graph .edges_directed(nodes[1 - j], Incoming) .map(|edge| (edge.source(), edge.weight())) .collect(); if !is_subset(&e_first, &e_second, matcher)? { return Ok(false); }; } } } Ok(true) } /// Return Some(mapping) if isomorphism is decided, else None. fn next(&mut self, py: Python) -> PyResult<Option<NodeMap>> { if (self.st[0] .graph .node_count() .cmp(&self.st[1].graph.node_count()) .then(self.ordering) != self.ordering) || (self.st[0] .graph .edge_count() .cmp(&self.st[1].graph.edge_count()) .then(self.ordering) != self.ordering) { return Ok(None); } // A "depth first" search of a valid mapping from graph 1 to graph 2 // F(s, n, m) -- evaluate state s and add mapping n <-> m // Find least T1out node (in st.out[1] but not in M[1]) while let Some(frame) = self.stack.pop() { match frame { Frame::Unwind { nodes, open_list: ol, } => { Vf2Algorithm::<Ty, F, G>::pop_state(&mut self.st, nodes); match Vf2Algorithm::<Ty, F, G>::next_from_ix( &mut self.st, nodes[0], ol, ) { None => continue, Some(nx) => { let f = Frame::Inner { nodes: [nx, nodes[1]], open_list: ol, }; self.stack.push(f); } } } Frame::Outer => { match Vf2Algorithm::<Ty, F, G>::next_candidate(&mut self.st) { None => { if self.st[1].is_complete() { return Ok(Some(self.mapping())); } continue; } Some((nx, mx, ol)) => { let f = Frame::Inner { nodes: [nx, mx], open_list: ol, }; self.stack.push(f); } } } Frame::Inner { nodes, open_list: ol, } => { if Vf2Algorithm::<Ty, F, G>::is_feasible( py, &mut self.st, nodes, &mut self.node_match, &mut self.edge_match, self.ordering, self.induced, )? { Vf2Algorithm::<Ty, F, G>::push_state( &mut self.st, nodes, ); // Check cardinalities of Tin, Tout sets if self.st[0] .out_size .cmp(&self.st[1].out_size) .then(self.ordering) == self.ordering && self.st[0] .ins_size .cmp(&self.st[1].ins_size) .then(self.ordering) == self.ordering { self._counter += 1; if let Some(limit) = self.call_limit { if self._counter > limit { return Ok(None); } } let f0 = Frame::Unwind { nodes, open_list: ol, }; self.stack.push(f0); self.stack.push(Frame::Outer); continue; } Vf2Algorithm::<Ty, F, G>::pop_state( &mut self.st, nodes, ); } match Vf2Algorithm::<Ty, F, G>::next_from_ix( &mut self.st, nodes[0], ol, ) { None => continue, Some(nx) => { let f = Frame::Inner { nodes: [nx, nodes[1]], open_list: ol, }; self.stack.push(f); } } } } } Ok(None) } } macro_rules! vf2_mapping_impl { ($name:ident, $Ty:ty) => { #[pyclass(module = "retworkx", gc)] pub struct $name { vf2: Vf2Algorithm<$Ty, Option<PyObject>, Option<PyObject>>, } impl $name { pub fn new( py: Python, g0: &StablePyGraph<$Ty>, g1: &StablePyGraph<$Ty>, node_match: Option<PyObject>, edge_match: Option<PyObject>, id_order: bool, ordering: Ordering, induced: bool, call_limit: Option<usize>, ) -> Self { let vf2 = Vf2Algorithm::new( py, g0, g1, node_match, edge_match, id_order, ordering, induced, call_limit, ); $name { vf2 } } } #[pyproto] impl PyIterProtocol for $name { fn __iter__(slf: PyRef<Self>) -> Py<$name> { slf.into() } fn __next__( mut slf: PyRefMut<Self>, ) -> PyResult<IterNextOutput<NodeMap, &'static str>> { Python::with_gil(|py| match slf.vf2.next(py)? { Some(mapping) => Ok(IterNextOutput::Yield(mapping)), None => Ok(IterNextOutput::Return("Ended")), }) } } #[pyproto] impl PyGCProtocol for $name { fn __traverse__( &self, visit: PyVisit, ) -> Result<(), PyTraverseError> { for j in 0..2 { for node in self.vf2.st[j].graph.node_weights() { visit.call(node)?; } for edge in self.vf2.st[j].graph.edge_weights() { visit.call(edge)?; } } if let Some(ref obj) = self.vf2.node_match { visit.call(obj)?; } if let Some(ref obj) = self.vf2.edge_match { visit.call(obj)?; } Ok(()) } fn __clear__(&mut self) { self.vf2.st[0].graph = StablePyGraph::<$Ty>::default(); self.vf2.st[1].graph = StablePyGraph::<$Ty>::default(); self.vf2.node_match = None; self.vf2.edge_match = None; } } }; } vf2_mapping_impl!(DiGraphVf2Mapping, Directed); vf2_mapping_impl!(GraphVf2Mapping, Undirected);
32.210573
83
0.441455
769feef49dd0c7eaa65853c210631f7227eb0dc4
11,507
//! Internal target registry use super::{Chip, ChipFamily, ChipInfo, Target, TargetDescriptionSource}; use crate::config::CoreType; use lazy_static::lazy_static; use std::fs::File; use std::path::Path; use std::sync::{Arc, Mutex, TryLockError}; use thiserror::Error; lazy_static! { static ref REGISTRY: Arc<Mutex<Registry>> = Arc::new(Mutex::new(Registry::from_builtin_families())); } /// Error type for all errors which occur when working /// with the internal registry of targets. #[derive(Debug, Error)] pub enum RegistryError { /// The requested chip was not found in the registry. #[error("The requested chip '{0}' was not found in the list of known targets.")] ChipNotFound(String), /// When searching for a chip based on information read from the target, /// no matching chip was found in the registry. #[error("The connected chip could not automatically be determined.")] ChipAutodetectFailed, /// A core type contained in a target description is not supported /// in probe-rs. #[error("The core type '{0}' is not supported in probe-rs.")] UnknownCoreType(String), /// An IO error which occured when trying to read a target description file. #[error("An IO error was encountered")] Io(#[from] std::io::Error), /// An error occured while deserializing a YAML target description file. #[error("Deserializing the yaml encountered an error")] Yaml(#[from] serde_yaml::Error), /// Unable to lock the registry. #[error("Unable to lock registry")] LockUnavailable, } impl<R> From<TryLockError<R>> for RegistryError { fn from(_: TryLockError<R>) -> Self { RegistryError::LockUnavailable } } fn add_generic_targets(vec: &mut Vec<ChipFamily>) { vec.extend_from_slice(&[ ChipFamily { name: "Generic Cortex-M0".to_owned(), manufacturer: None, variants: vec![Chip { name: "cortex-m0".to_owned(), part: None, memory_map: vec![], flash_algorithms: vec![], }], flash_algorithms: vec![], core: CoreType::M0, source: TargetDescriptionSource::Generic, }, ChipFamily { name: "Generic Cortex-M4".to_owned(), manufacturer: None, variants: vec![Chip { name: "cortex-m4".to_owned(), part: None, memory_map: vec![], flash_algorithms: vec![], }], flash_algorithms: vec![], core: CoreType::M4, source: TargetDescriptionSource::Generic, }, ChipFamily { name: "Generic Cortex-M3".to_owned(), manufacturer: None, variants: vec![Chip { name: "cortex-m3".to_owned(), part: None, memory_map: vec![], flash_algorithms: vec![], }], flash_algorithms: vec![], core: CoreType::M3, source: TargetDescriptionSource::Generic, }, ChipFamily { name: "Generic Cortex-M33".to_owned(), manufacturer: None, variants: vec![Chip { name: "cortex-m33".to_owned(), part: None, memory_map: vec![], flash_algorithms: vec![], }], flash_algorithms: vec![], core: CoreType::M33, source: TargetDescriptionSource::Generic, }, ChipFamily { name: "Generic Cortex-M7".to_owned(), manufacturer: None, variants: vec![Chip { name: "cortex-m7".to_owned(), part: None, memory_map: vec![], flash_algorithms: vec![], }], flash_algorithms: vec![], core: CoreType::M7, source: TargetDescriptionSource::Generic, }, ChipFamily { name: "Generic Riscv".to_owned(), manufacturer: None, variants: vec![Chip { name: "riscv".to_owned(), part: None, memory_map: vec![], flash_algorithms: vec![], }], flash_algorithms: vec![], core: CoreType::Riscv, source: TargetDescriptionSource::Generic, }, ]); } /// Registry of all available targets. struct Registry { /// All the available chips. families: Vec<ChipFamily>, } impl Registry { fn from_builtin_families() -> Self { const BUILTIN_TARGETS: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/targets.bincode")); let mut families: Vec<ChipFamily> = bincode::deserialize(BUILTIN_TARGETS) .expect("Failed to deserialize builtin targets. This is a bug."); add_generic_targets(&mut families); Self { families } } #[cfg(not(feature = "builtin-targets"))] fn from_builtin_families() -> Self { let mut families = vec![]; add_generic_targets(&mut families); families } fn families(&self) -> &Vec<ChipFamily> { &self.families } fn get_target_by_name(&self, name: impl AsRef<str>) -> Result<Target, RegistryError> { let name = name.as_ref(); log::debug!("Searching registry for chip with name {}", name); let (family, chip) = { // Try get the corresponding chip. let mut selected_family_and_chip = None; for family in &self.families { for variant in family.variants.iter() { if variant .name .to_ascii_lowercase() .starts_with(&name.to_ascii_lowercase()) { if variant.name.to_ascii_lowercase() != name.to_ascii_lowercase() { log::warn!( "Found chip {} which matches given partial name {}. Consider specifying its full name.", variant.name, name, ) } selected_family_and_chip = Some((family, variant)); } } } let (family, chip) = selected_family_and_chip .ok_or_else(|| RegistryError::ChipNotFound(name.to_owned()))?; // Try get the correspnding flash algorithm. (family, chip) }; self.get_target(family, chip) } fn search_chips(&self, name: &str) -> Vec<String> { log::debug!("Searching registry for chip with name {}", name); let mut targets = Vec::new(); for family in &self.families { for variant in family.variants.iter() { if variant .name .to_ascii_lowercase() .starts_with(&name.to_ascii_lowercase()) { targets.push(variant.name.to_string()) } } } targets } fn get_target_by_chip_info(&self, chip_info: ChipInfo) -> Result<Target, RegistryError> { let (family, chip) = { match chip_info { ChipInfo::Arm(chip_info) => { // Try get the corresponding chip. let families = self.families.iter().filter(|f| { f.manufacturer .map(|m| m == chip_info.manufacturer) .unwrap_or(false) }); let mut identified_chips = Vec::new(); for family in families { log::debug!("Checking family {}", family.name); let chips = family .variants() .iter() .filter(|v| v.part.map(|p| p == chip_info.part).unwrap_or(false)) .map(|c| (family, c)); identified_chips.extend(chips) } if identified_chips.len() == 1 { identified_chips.pop().unwrap() } else { log::debug!( "Found {} matching chips for information {:?}, unable to determine chip", identified_chips.len(), chip_info ); return Err(RegistryError::ChipAutodetectFailed); } } } }; self.get_target(family, chip) } fn get_target(&self, family: &ChipFamily, chip: &Chip) -> Result<Target, RegistryError> { // find relevant algorithms let chip_algorithms = chip .flash_algorithms .iter() .filter_map(|fa| family.get_algorithm(fa)) .cloned() .collect(); Ok(Target::new( chip, chip_algorithms, family.core, family.source.clone(), )) } fn add_target_from_yaml(&mut self, path_to_yaml: &Path) -> Result<(), RegistryError> { let file = File::open(path_to_yaml)?; let chip: ChipFamily = serde_yaml::from_reader(file)?; let index = self .families .iter() .position(|old_chip| old_chip.name == chip.name); if let Some(index) = index { self.families.remove(index); } self.families.push(chip); Ok(()) } } /// Get a target from the internal registry based on its name. pub fn get_target_by_name(name: impl AsRef<str>) -> Result<Target, RegistryError> { REGISTRY.try_lock()?.get_target_by_name(name) } /// Get a target from the internal registry based on its name. pub fn search_chips(name: impl AsRef<str>) -> Result<Vec<String>, RegistryError> { Ok(REGISTRY.try_lock()?.search_chips(name.as_ref())) } /// Try to retrieve a target based on [ChipInfo] read from a target. pub(crate) fn get_target_by_chip_info(chip_info: ChipInfo) -> Result<Target, RegistryError> { REGISTRY.try_lock()?.get_target_by_chip_info(chip_info) } /// Parse a target description file and add the contained targets /// to the internal target registry. pub fn add_target_from_yaml(path_to_yaml: &Path) -> Result<(), RegistryError> { REGISTRY.try_lock()?.add_target_from_yaml(path_to_yaml) } /// Get a list of all families which are contained in the internal /// registry. pub fn families() -> Result<Vec<ChipFamily>, RegistryError> { Ok(REGISTRY.try_lock()?.families().clone()) } #[cfg(test)] mod tests { use super::*; #[test] fn try_fetch1() { let registry = Registry::from_builtin_families(); assert!(registry.get_target_by_name("nrf51").is_ok()); } #[test] fn try_fetch2() { let registry = Registry::from_builtin_families(); assert!(registry.get_target_by_name("nrf5182").is_ok()); } #[test] fn try_fetch3() { let registry = Registry::from_builtin_families(); assert!(registry.get_target_by_name("nrF51822_x").is_ok()); } #[test] fn try_fetch4() { let registry = Registry::from_builtin_families(); assert!(registry.get_target_by_name("nrf51822_Xxaa").is_ok()); } }
33.257225
120
0.53411
0a8ef711e9647eb0378d50e02d01592bed904c06
399
#![no_main] use libfuzzer_sys::fuzz_target; fuzz_target!(|data: [[u8; 27]; 3]| { let data = unsafe { std::mem::transmute::<[[u8; 27]; 3], [u8; 81]>(data) }; let sudoku = msolve::Sudoku::from(data); for solution in sudoku.iter().take(2) { assert!(solution.to_array().iter().all(|x| *x <= 9 && *x != 0)); assert!(solution.to_bytes().iter().all(|x| *x != b'.')); } });
33.25
79
0.546366
71d7ef03323dd751b63fb5ee3720de3bfdfde694
3,969
//! WASI host types specific to Windows host. use crate::host::FileType; use crate::{wasi, Error, Result}; use std::convert::TryInto; use std::ffi::OsStr; use std::fs::{self, File}; use std::io; use std::os::windows::ffi::OsStrExt; use std::time::{SystemTime, UNIX_EPOCH}; pub(crate) fn errno_from_win(error: winx::winerror::WinError) -> wasi::__wasi_errno_t { // TODO: implement error mapping between Windows and WASI use winx::winerror::WinError::*; match error { ERROR_SUCCESS => wasi::__WASI_ERRNO_SUCCESS, ERROR_BAD_ENVIRONMENT => wasi::__WASI_ERRNO_2BIG, ERROR_FILE_NOT_FOUND => wasi::__WASI_ERRNO_NOENT, ERROR_PATH_NOT_FOUND => wasi::__WASI_ERRNO_NOENT, ERROR_TOO_MANY_OPEN_FILES => wasi::__WASI_ERRNO_NFILE, ERROR_ACCESS_DENIED => wasi::__WASI_ERRNO_ACCES, ERROR_SHARING_VIOLATION => wasi::__WASI_ERRNO_ACCES, ERROR_PRIVILEGE_NOT_HELD => wasi::__WASI_ERRNO_NOTCAPABLE, // TODO is this the correct mapping? ERROR_INVALID_HANDLE => wasi::__WASI_ERRNO_BADF, ERROR_INVALID_NAME => wasi::__WASI_ERRNO_NOENT, ERROR_NOT_ENOUGH_MEMORY => wasi::__WASI_ERRNO_NOMEM, ERROR_OUTOFMEMORY => wasi::__WASI_ERRNO_NOMEM, ERROR_DIR_NOT_EMPTY => wasi::__WASI_ERRNO_NOTEMPTY, ERROR_NOT_READY => wasi::__WASI_ERRNO_BUSY, ERROR_BUSY => wasi::__WASI_ERRNO_BUSY, ERROR_NOT_SUPPORTED => wasi::__WASI_ERRNO_NOTSUP, ERROR_FILE_EXISTS => wasi::__WASI_ERRNO_EXIST, ERROR_BROKEN_PIPE => wasi::__WASI_ERRNO_PIPE, ERROR_BUFFER_OVERFLOW => wasi::__WASI_ERRNO_NAMETOOLONG, ERROR_NOT_A_REPARSE_POINT => wasi::__WASI_ERRNO_INVAL, ERROR_NEGATIVE_SEEK => wasi::__WASI_ERRNO_INVAL, ERROR_DIRECTORY => wasi::__WASI_ERRNO_NOTDIR, ERROR_ALREADY_EXISTS => wasi::__WASI_ERRNO_EXIST, _ => wasi::__WASI_ERRNO_NOTSUP, } } pub(crate) fn filetype_from_std(ftype: &fs::FileType) -> FileType { if ftype.is_file() { FileType::RegularFile } else if ftype.is_dir() { FileType::Directory } else if ftype.is_symlink() { FileType::Symlink } else { FileType::Unknown } } fn num_hardlinks(file: &File) -> io::Result<u64> { Ok(winx::file::get_fileinfo(file)?.nNumberOfLinks.into()) } fn device_id(file: &File) -> io::Result<u64> { Ok(winx::file::get_fileinfo(file)?.dwVolumeSerialNumber.into()) } pub(crate) fn file_serial_no(file: &File) -> io::Result<u64> { let info = winx::file::get_fileinfo(file)?; let high = info.nFileIndexHigh; let low = info.nFileIndexLow; let no = (u64::from(high) << 32) | u64::from(low); Ok(no) } fn change_time(file: &File) -> io::Result<i64> { winx::file::change_time(file) } fn systemtime_to_timestamp(st: SystemTime) -> Result<u64> { st.duration_since(UNIX_EPOCH) .map_err(|_| Error::EINVAL)? // date earlier than UNIX_EPOCH .as_nanos() .try_into() .map_err(Into::into) // u128 doesn't fit into u64 } pub(crate) fn filestat_from_win(file: &File) -> Result<wasi::__wasi_filestat_t> { let metadata = file.metadata()?; Ok(wasi::__wasi_filestat_t { dev: device_id(file)?, ino: file_serial_no(file)?, nlink: num_hardlinks(file)?.try_into()?, // u64 doesn't fit into u32 size: metadata.len(), atim: systemtime_to_timestamp(metadata.accessed()?)?, ctim: change_time(file)?.try_into()?, // i64 doesn't fit into u64 mtim: systemtime_to_timestamp(metadata.modified()?)?, filetype: filetype_from_std(&metadata.file_type()).to_wasi(), }) } /// Creates owned WASI path from OS string. /// /// NB WASI spec requires OS string to be valid UTF-8. Otherwise, /// `__WASI_ERRNO_ILSEQ` error is returned. pub(crate) fn path_from_host<S: AsRef<OsStr>>(s: S) -> Result<String> { let vec: Vec<u16> = s.as_ref().encode_wide().collect(); String::from_utf16(&vec).map_err(|_| Error::EILSEQ) }
38.163462
103
0.675233
0ebfc40e5e48ebb2d1f39268e5a778117edbc600
7,909
use crate::acpi; use crate::ap; use crate::apic; use crate::boot_info::BootInfo; use crate::device; use crate::interrupt; use crate::linux; use crate::logger; use crate::memory; use crate::multiboot2; use crate::percore; use crate::time; use crate::vcpu; use crate::vm; use alloc::collections::BTreeMap; use alloc::sync::Arc; use alloc::vec::Vec; use log::{debug, info}; use spin::RwLock; extern "C" { static AP_STARTUP_ADDR: u16; static mut AP_STACK_ADDR: u64; static mut AP_IDX: u64; static mut AP_READY: u8; } // Temporary helper function to create a vm for a single core fn default_vm( core: usize, mem: u64, info: &BootInfo, ) -> Arc<RwLock<vm::VirtualMachine>> { let mut config = vm::VirtualMachineConfig::new(vec![core as u8], mem); // FIXME: When `map_bios` may return an error, log the error. config.map_bios("seabios.bin".into()).unwrap_or(()); let device_map = config.device_map(); device_map .register_device(device::acpi::AcpiRuntime::new(0xb000).unwrap()) .unwrap(); device_map .register_device(device::com::ComDevice::new(core as u64, 0x3F8)) .unwrap(); device_map .register_device(device::com::ComDevice::new(core as u64, 0x2F8)) .unwrap(); device_map .register_device(device::com::ComDevice::new(core as u64, 0x3E8)) .unwrap(); device_map .register_device(device::com::ComDevice::new(core as u64, 0x2E8)) .unwrap(); device_map .register_device(device::debug::DebugPort::new(core as u64, 0x402)) .unwrap(); device_map .register_device(device::vga::VgaController::new()) .unwrap(); device_map .register_device(device::dma::Dma8237::new()) .unwrap(); device_map .register_device(device::ignore::IgnoredDevice::new()) .unwrap(); device_map .register_device(device::pci::PciRootComplex::new()) .unwrap(); device_map .register_device(device::pic::Pic8259::new()) .unwrap(); device_map .register_device(device::keyboard::Keyboard8042::new()) .unwrap(); device_map .register_device(device::pit::Pit8254::new()) .unwrap(); device_map .register_device(device::pos::ProgrammableOptionSelect::new()) .unwrap(); device_map .register_device(device::rtc::CmosRtc::new(mem)) .unwrap(); //TODO: this should actually be per-vcpu device_map .register_device(device::lapic::LocalApic::new()) .unwrap(); let mut fw_cfg_builder = device::qemu_fw_cfg::QemuFwCfgBuilder::new(); // The 'linuxboot' file is an option rom that loads the linux kernel // via qemu_fw_cfg fw_cfg_builder .add_file( "genroms/linuxboot_dma.bin", info.find_module("linuxboot_dma.bin").unwrap().data(), ) .unwrap(); // Passing the bootorder file automatically selects the option rom // as the default boot device fw_cfg_builder .add_file( "bootorder", "/rom@genroms/linuxboot_dma.bin\nHALT".as_bytes(), ) .unwrap(); linux::load_linux( "kernel", "initramfs", core::concat!( "rodata=0 nopti disableapic acpi=off ", "earlyprintk=serial,0x3f8,115200 ", "console=ttyS0 debug nokaslr noapic mitigations=off ", "root=/dev/ram0 rdinit=/init\0" ) .as_bytes(), mem, &mut fw_cfg_builder, info, ) .unwrap(); device_map.register_device(fw_cfg_builder.build()).unwrap(); vm::VirtualMachine::new(config, info).expect("Failed to create vm") } #[no_mangle] pub extern "C" fn ap_entry(_ap_data: &ap::ApData) -> ! { unsafe { interrupt::idt::ap_init() }; let local_apic = apic::LocalApic::init().expect("Failed to initialize local APIC"); info!( "X2APIC:\tid={}\tbase=0x{:x}\tversion=0x{:x})", local_apic.id(), local_apic.raw_base(), local_apic.version() ); vcpu::mp_entry_point() } static LOGGER: logger::DirectLogger = logger::DirectLogger::new(); #[no_mangle] pub unsafe extern "C" fn kmain_multiboot2(multiboot_info_addr: usize) -> ! { let boot_info = multiboot2::early_init_multiboot2( memory::HostPhysAddr::new(multiboot_info_addr as u64), ); kmain(boot_info) } unsafe fn kmain(mut boot_info: BootInfo) -> ! { // Setup the actual interrupt handlers interrupt::idt::init(); // Setup our (com0) logger log::set_logger(&LOGGER) .map(|()| log::set_max_level(log::LevelFilter::Info)) .expect("Failed to set logger"); // Calibrate the global time source time::init_global_time().expect("Failed to init global timesource"); // If the boot method provided an RSDT, use that one. Otherwise, search the // BIOS areas for it. let rsdt = boot_info .rsdp .get_or_insert_with(|| { acpi::rsdp::RSDP::find().expect("Failed to find the RSDP") }) .rsdt() .expect("Failed to read RSDT"); // Initialize the BSP local APIC let local_apic = apic::LocalApic::init().expect("Failed to initialize local APIC"); let madt_sdt = rsdt.find_entry(b"APIC").expect("No MADT found"); let madt = acpi::madt::MADT::new(&madt_sdt); let apic_ids = madt .structures() .filter_map(|ics| match ics { // TODO(dlrobertson): Check the flags to ensure we can acutally // use this APIC. Ok(acpi::madt::Ics::LocalApic { apic_id, .. }) => { Some(apic_id as u32) } _ => None, }) .collect::<Vec<_>>(); percore::init_sections(apic_ids.len()) .expect("Failed to initialize per-core sections"); let mut map = BTreeMap::new(); for apic_id in apic_ids.iter() { map.insert( *apic_id as usize, default_vm(*apic_id as usize, 256, &boot_info), ); } vm::VM_MAP = Some(map); debug!("AP_STARTUP address: 0x{:x}", AP_STARTUP_ADDR); for (idx, apic_id) in apic_ids.into_iter().enumerate() { if apic_id == local_apic.id() as u32 { continue; } // Allocate a stack for the AP let stack = vec![0u8; 100 * 1024]; // Get the the bottom of the stack and align let stack_bottom = (stack.as_ptr() as u64 + stack.len() as u64) & 0xFFFFFFFFFFFFFFF0; core::mem::forget(stack); core::ptr::write_volatile(&mut AP_STACK_ADDR as *mut u64, stack_bottom); // Map the APIC ids to a sequential list and pass it to the AP core::ptr::write_volatile(&mut AP_IDX as *mut u64, idx as u64); // mfence to ensure that the APs see the new stack address core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst); debug!("Send INIT to AP id={}", apic_id); local_apic.send_ipi( apic_id, apic::DstShorthand::NoShorthand, apic::TriggerMode::Edge, apic::Level::Assert, apic::DstMode::Physical, apic::DeliveryMode::Init, 0, ); debug!("Send SIPI to AP id={}", apic_id); local_apic.send_ipi( apic_id, apic::DstShorthand::NoShorthand, apic::TriggerMode::Edge, apic::Level::Assert, apic::DstMode::Physical, apic::DeliveryMode::StartUp, (AP_STARTUP_ADDR >> 12) as u8, ); // Wait until the AP reports that it is done with startup while core::ptr::read_volatile(&AP_READY as *const u8) != 1 {} // Once the AP is done, clear the ready flag core::ptr::write_volatile(&mut AP_READY as *mut u8, 0); } vcpu::mp_entry_point() }
29.511194
80
0.597421
3afdca201965f8fc30838b68c38636ba762b3975
823
use crate::{fd, Argdata, ReadError, Value}; use byteorder::{BigEndian, ByteOrder}; use std::io; #[derive(Clone, Copy, PartialEq, PartialOrd, Debug)] pub struct Float { value: f64, } /// Create an argdata value representing a 64-bit floating point value. pub fn float<T: Into<f64>>(value: T) -> Float { Float { value: value.into(), } } impl Float { pub fn value(&self) -> f64 { self.value } } impl<'d> Argdata<'d> for Float { fn read<'a>(&'a self) -> Result<Value<'a, 'd>, ReadError> where 'd: 'a, { Ok(Value::Float(self.value)) } fn serialized_length(&self) -> usize { 9 } fn serialize(&self, writer: &mut dyn io::Write, _: Option<&mut dyn fd::FdMapping>) -> io::Result<()> { let mut buf = [0; 9]; buf[0] = 5; BigEndian::write_f64(&mut buf[1..], self.value); writer.write_all(&buf) } }
19.595238
103
0.625759
4babef54177349f3b66262843d730afb55b890e7
987
use super::call_method; use crate::{bot::InnerBot, errors}; use serde::Serialize; /// Deletes a sticker from a sticker set. /// /// Reflects the [`deleteStickerFromSet`][docs] method /// /// [docs]: https://core.telegram.org/bots/api#deletestickerfromset #[derive(Serialize, Debug, Clone)] #[must_use = "methods do nothing unless turned into a future"] pub struct DeleteStickerFromSet<'a> { #[serde(skip)] bot: &'a InnerBot, sticker: String, } impl<'a> DeleteStickerFromSet<'a> { pub(crate) fn new(bot: &'a InnerBot, sticker: impl Into<String>) -> Self { Self { bot, sticker: sticker.into(), } } } impl DeleteStickerFromSet<'_> { /// Calls the method. pub async fn call(self) -> Result<(), errors::MethodCall> { call_method::<bool>( self.bot, "deleteStickerFromSet", None, serde_json::to_vec(&self).unwrap(), ) .await?; Ok(()) } }
24.073171
78
0.585613
89ae950efdc65141c190d7393bd0ad158f2d7581
1,142
use std::collections::{HashMap, HashSet}; static INPUT: &'static str = include_str!(r"input.txt"); fn parse(s: &str) -> (&str, &str) { let mut splitter = s.split(")"); (splitter.next().unwrap(), splitter.next().unwrap()) } fn orbitters_of<'a>(orbittee: &'a str, map: &HashMap<&'a str, HashSet<&'a str>>) -> usize { map.get(&orbittee) .map(|orbits| { orbits .iter() .fold(0, |count, orbitter| count + 1 + orbitters_of(orbitter, map)) }) .unwrap_or(0) } fn orbitters_wrap<'a>(orbits: &HashMap<&'a str, HashSet<&'a str>>) -> usize { orbits .iter() .map(|(orbittee, _)| orbitters_of(orbittee, &orbits)) .sum() } fn main() { let mut orbits: HashMap<&str, HashSet<&str>> = HashMap::new(); for line in INPUT.lines() { let (orbittee, orbitter) = parse(line); orbits .entry(orbittee) .or_insert_with(HashSet::default) .insert(orbitter); orbits.entry(orbitter).or_insert_with(HashSet::default); } let res = orbitters_wrap(&orbits); println!("{}", res); }
25.954545
91
0.552539
e4fca269d3ff6dc6dc09d4c1a81e1c27ebefb900
1,398
// Copyright 2018 Dmitry Tantsur <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate env_logger; extern crate openstack; use std::env; #[cfg(feature = "compute")] fn main() { env_logger::init(); let os = openstack::Cloud::from_env() .expect("Failed to create an identity provider from the environment"); let id = env::args().nth(1).expect("Provide a flavor ID"); let flavor = os.get_flavor(id).expect("Cannot get a flavor"); println!( "ID = {}, Name = {}, VCPUs = {}, RAM = {} MiB, DISK = {} GiB", flavor.id(), flavor.name(), flavor.vcpu_count(), flavor.ram_size(), flavor.root_size() ); println!("Extra Specs = {:?}", flavor.extra_specs()); } #[cfg(not(feature = "compute"))] fn main() { panic!("This example cannot run with 'compute' feature disabled"); }
31.066667
78
0.658083
5d1816eb2ba47757d8d5d39fa5f2d1a378241ce9
5,841
use super::*; use core::any::type_name; use core::convert::identity; use core::borrow::Borrow; use core::borrow::BorrowMut; use core::fmt; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop}; use core::ops; use core::ptr; use crate::marshal::*; use crate::marshal::blob::*; use crate::marshal::decode::*; use crate::marshal::encode::*; use crate::marshal::load::*; use crate::marshal::save::*; /// An owned pointer. /// /// Extends`ValidPtr` with ownership semantics, acting like it owns a `T` value and properly /// deallocating the pointer on drop. #[repr(transparent)] pub struct OwnedPtr<T: ?Sized + Pointee, Z: Zone> { marker: PhantomData<Box<T>>, inner: ManuallyDrop<ValidPtr<T, Z>>, } impl<T: ?Sized + Pointee, Z: Zone> ops::Deref for OwnedPtr<T, Z> { type Target = ValidPtr<T, Z>; fn deref(&self) -> &ValidPtr<T, Z> { &self.inner } } impl<T: ?Sized + Pointee, Z: Zone> ops::DerefMut for OwnedPtr<T, Z> { fn deref_mut(&mut self) -> &mut ValidPtr<T, Z> { &mut self.inner } } impl<T: ?Sized + Pointee, Z: Zone> Borrow<ValidPtr<T, Z>> for OwnedPtr<T, Z> { fn borrow(&self) -> &ValidPtr<T, Z> { self } } impl<T: ?Sized + Pointee, Z: Zone> BorrowMut<ValidPtr<T, Z>> for OwnedPtr<T, Z> { fn borrow_mut(&mut self) -> &mut ValidPtr<T, Z> { self } } impl<T: ?Sized + Pointee, Z: Zone> OwnedPtr<T, Z> { pub fn new(value: impl Take<T>) -> Self where Z: Default { Z::alloc(value) } /// Creates a new `OwnedPtr` from a `ValidPtr`. /// /// # Safety /// /// The `ValidPtr` must point to a uniquely owned value that can be safely dropped via /// `Ptr::dealloc_owned()`. pub unsafe fn new_unchecked(ptr: ValidPtr<T, Z>) -> Self { Self { marker: PhantomData, inner: ManuallyDrop::new(ptr), } } /// Unwraps the inner `ValidPtr`. /// /// The value is *not* deallocated! It is the callee's responsibility to do that; failing to do /// so may leak memory. pub fn into_inner(self) -> ValidPtr<T, Z> { let mut this = ManuallyDrop::new(self); unsafe { (&mut *this.inner as *mut ValidPtr<T, Z>).read() } } } impl<T: ?Sized + Pointee, Z: Zone> Drop for OwnedPtr<T, Z> { fn drop(&mut self) { unsafe { let this = ptr::read(self); Z::try_take_dirty_unsized(this, |this| { match this { Ok(value) => { // value is a &mut ManuallyDrop<T>, so we need to coerce it first or // drop_in_place won't actually do anything let value: &mut T = value; ptr::drop_in_place(value) } Err(_persist_ptr) => (), } }) } } } impl<T: ?Sized + Pointee, Z: Zone> Clone for OwnedPtr<T, Z> where T: Clone, Z: Clone { fn clone(&self) -> Self { Z::clone_ptr(self) } } impl<T: ?Sized + Pointee, Z: Zone> fmt::Debug for OwnedPtr<T, Z> where T: fmt::Debug { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Z::fmt_debug_valid_ptr(self, f) } } impl<T: ?Sized + Pointee, Z: Zone> ValidateBlob for OwnedPtr<T, Z> where T::Metadata: ValidateBlob, { type Error = <ValidPtr<T, Z> as ValidateBlob>::Error; fn validate<'a, V: PaddingValidator>( mut blob: BlobCursor<'a, Self, V>, ) -> Result<ValidBlob<'a, Self>, BlobError<Self::Error, V::Error>> { blob.field::<FatPtr<T,Z>,_>(identity)?; unsafe { blob.assume_valid() } } } unsafe impl<T: ?Sized + PersistPointee, Z: Zone> Persist for OwnedPtr<T, Z> { type Persist = OwnedPtr<T::Persist, Z::Persist>; type Error = <OwnedPtr<T::Persist, Z::Persist> as ValidateBlob>::Error; } unsafe impl<'a, Z: Zone, T: ?Sized + Pointee> ValidateChildren<'a, Z> for OwnedPtr<T, Z> where T: ValidatePointeeChildren<'a, Z> { type State = super::validptr::ValidateState<'a, T::Persist, T::State>; fn validate_children(this: &'a OwnedPtr<T::Persist, Z::Persist>) -> Self::State { <ValidPtr<T,Z> as ValidateChildren<'a, Z>>::validate_children(this) } fn poll<V: PtrValidator<Z>>(this: &'a Self::Persist, state: &mut Self::State, validator: &V) -> Result<(), V::Error> { <ValidPtr<T,Z> as ValidateChildren<'a, Z>>::poll(this, state, validator)?; Ok(()) } } impl<Z: Zone, T: ?Sized + Load<Z>> Decode<Z> for OwnedPtr<T,Z> { } impl<T: ?Sized + Pointee, Z: Zone, Y: Zone> Encoded<Y> for OwnedPtr<T,Z> where T: Saved<Y> { type Encoded = OwnedPtr<T::Saved, Y>; } impl<'a, T: 'a + ?Sized + Pointee, Z: 'a + Zone, Y: Zone> Encode<'a, Y> for OwnedPtr<T,Z> where T: Save<'a, Y>, Z: SavePtr<Y>, { type State = super::validptr::EncodeState<'a, T, Z, Y>; fn make_encode_state(&'a self) -> Self::State { self.inner.make_encode_state() } fn encode_poll<D: Dumper<Y>>(&self, state: &mut Self::State, dumper: D) -> Result<D, D::Error> { self.inner.encode_poll(state, dumper) } fn encode_blob<W: WriteBlob>(&self, state: &Self::State, dst: W) -> Result<W::Ok, W::Error> { self.inner.encode_blob(state, dst) } } /* impl<'a, Y: Zone, Z: 'a + Zone + Encode<'a, Y>, T: 'a + ?Sized + Save<'a, Y>> Encode<'a, Y> for OwnedPtr<T, Z> { type State = <ValidPtr<T, Z> as Encode<'a, Y>>::State; fn save_children(&'a self) -> Self::State { Encode::save_children(&*self.inner) } fn poll<D: Dumper<Y>>(&self, state: &mut Self::State, dumper: D) -> Result<D, D::Error> { Encode::poll(&*self.inner, state, dumper) } fn encode_blob<W: WriteBlob>(&self, state: &Self::State, dst: W) -> Result<W::Ok, W::Error> { self.inner.encode_blob(state, dst) } } */
28.773399
122
0.576614
283484c54161b7a16450fc2504aa593115996426
369
#![allow(deprecated)] extern crate cylus; use cylus::Cylus; fn main() { println!("Starting.."); let gpio = Cylus::new(24); for _ in 1..10 { println!("{}", gpio.read()); gpio.high(); std::thread::sleep_ms(1000); println!("{}", gpio.read()); gpio.low(); std::thread::sleep_ms(1000); } }
18.45
36
0.482385
ab01fa88b2752da68efe7088d8503950cef075b4
8,487
use distribution; use source::Source; /// A PERT distribution. #[derive(Clone, Copy, Debug)] pub struct Pert { a: f64, b: f64, c: f64, alpha: f64, beta: f64, ln_beta: f64, } impl Pert { /// Create a PERT distribution with parameters `a`, `b`, and `c`. /// /// It should hold that `a < b < c`. #[inline] pub fn new(a: f64, b: f64, c: f64) -> Self { use special::Beta as SpecialBeta; should!(a < b && b < c); let alpha = (4.0 * b + c - 5.0 * a) / (c - a); let beta = (5.0 * c - a - 4.0 * b) / (c - a); Pert { a: a, b: b, c: c, alpha: alpha, beta: beta, ln_beta: alpha.ln_beta(beta), } } /// Return the first parameter. #[inline(always)] pub fn a(&self) -> f64 { self.a } /// Return the second parameter. #[inline(always)] pub fn b(&self) -> f64 { self.b } /// Return the third parameter. #[inline(always)] pub fn c(&self) -> f64 { self.c } /// Return the first shape parameter of the corresponding Beta distribution. #[inline(always)] pub fn alpha(&self) -> f64 { self.alpha } /// Return the second shape parameter of the corresponding Beta distribution. #[inline(always)] pub fn beta(&self) -> f64 { self.beta } } impl distribution::Continuous for Pert { fn density(&self, x: f64) -> f64 { if x < self.a || x > self.c { 0.0 } else { let scale = self.c - self.a; let x = (x - self.a) / scale; ((self.alpha - 1.0) * x.ln() + (self.beta - 1.0) * (-x).ln_1p() - self.ln_beta).exp() / scale } } } impl distribution::Distribution for Pert { type Value = f64; fn distribution(&self, x: f64) -> f64 { use special::Beta; if x <= self.a { 0.0 } else if x >= self.c { 1.0 } else { ((x - self.a) / (self.c - self.a)).inc_beta(self.alpha, self.beta, self.ln_beta) } } } impl distribution::Entropy for Pert { fn entropy(&self) -> f64 { use special::Gamma; let sum = self.alpha + self.beta; (self.c - self.a).ln() + self.ln_beta - (self.alpha - 1.0) * self.alpha.digamma() - (self.beta - 1.0) * self.beta.digamma() + (sum - 2.0) * sum.digamma() } } impl distribution::Inverse for Pert { #[inline] fn inverse(&self, p: f64) -> f64 { use special::Beta; should!(0.0 <= p && p <= 1.0); self.a + (self.c - self.a) * p.inv_inc_beta(self.alpha, self.beta, self.ln_beta) } } impl distribution::Kurtosis for Pert { fn kurtosis(&self) -> f64 { let sum = self.alpha + self.beta; let delta = self.alpha - self.beta; let product = self.alpha * self.beta; 6.0 * (delta * delta * (sum + 1.0) - product * (sum + 2.0)) / (product * (sum + 2.0) * (sum + 3.0)) } } impl distribution::Mean for Pert { #[inline] fn mean(&self) -> f64 { (self.a + self.b * 4.0 + self.c) / 6.0 } } impl distribution::Median for Pert { fn median(&self) -> f64 { use distribution::Inverse; self.inverse(0.5) } } impl distribution::Modes for Pert { fn modes(&self) -> Vec<f64> { vec![self.b] } } impl distribution::Sample for Pert { #[inline] fn sample<S>(&self, source: &mut S) -> f64 where S: Source, { use distribution::gamma; let x = gamma::sample(self.alpha, source); let y = gamma::sample(self.beta, source); self.a + (self.c - self.a) * x / (x + y) } } impl distribution::Skewness for Pert { fn skewness(&self) -> f64 { let sum = self.alpha + self.beta; 2.0 * (self.beta - self.alpha) * (sum + 1.0).sqrt() / ((sum + 2.0) * (self.alpha * self.beta).sqrt()) } } impl distribution::Variance for Pert { fn variance(&self) -> f64 { use distribution::Mean; (self.mean() - self.a) * (self.c - self.mean()) / 7.0 } } #[cfg(test)] mod tests { use assert; use prelude::*; macro_rules! new( ($a:expr, $b:expr, $c:expr) => (Pert::new($a, $b, $c)); ); #[test] fn density() { let d = new!(-1.0, 0.5, 2.0); let beta = Beta::new(3.0, 3.0, -1.0, 2.0); let x = vec![-1.15, -1.0, -0.85, -0.5, 0.0, 0.5, 1.0, 1.5, 1.85, 2.0]; let p = vec![ 0.0, 0.0, 0.022562499999999996, 0.19290123456790118, 0.4938271604938269, 0.6249999999999999, 0.49382716049382713, 0.1929012345679011, 0.022562499999999933, 0.0, ]; assert::close( &x.iter().map(|&x| d.density(x)).collect::<Vec<_>>(), &x.iter().map(|&x| beta.density(x)).collect::<Vec<_>>(), 1e-14, ); assert::close( &x.iter().map(|&x| d.density(x)).collect::<Vec<_>>(), &p, 1e-14, ); } #[test] fn distribution() { let d = new!(-1.0, 0.5, 2.0); let beta = Beta::new(3.0, 3.0, -1.0, 2.0); let x = vec![-1.15, -1.0, -0.85, -0.5, 0.0, 0.5, 1.0, 1.5, 1.85, 2.0]; let p = vec![ 0.0, 0.0, 0.001158125, 0.03549382716049382, 0.20987654320987656, 0.5, 0.7901234567901234, 0.9645061728395061, 0.998841875, 1.0, ]; assert::close( &x.iter().map(|&x| d.distribution(x)).collect::<Vec<_>>(), &x.iter().map(|&x| beta.distribution(x)).collect::<Vec<_>>(), 1e-14, ); assert::close( &x.iter().map(|&x| d.distribution(x)).collect::<Vec<_>>(), &p, 1e-14, ); } #[test] fn entropy() { use std::f64::consts::E; let d = vec![ new!(0.0, 0.5, 1.0), new!(0.0, 0.5, E), new!(0.0, 0.3, 1.0), new!(-1.0, 1.0, 2.0), ]; assert::close( &d.iter().map(|d| d.entropy()).collect::<Vec<_>>(), &d.iter() .map(|d| Beta::new(d.alpha(), d.beta(), d.a(), d.c()).entropy()) .collect::<Vec<_>>(), 1e-15, ); } #[test] fn inverse() { let d = new!(-1.0, 0.5, 2.0); let p = vec![0.0, 0.2, 0.4, 0.6, 0.8, 1.0]; let x = vec![ -1.0, -0.020206186475766774, 0.33876229245942, 0.6612377075405802, 1.0202061864757672, 2.0, ]; assert::close( &p.iter().map(|&p| d.inverse(p)).collect::<Vec<_>>(), &x, 1e-14, ); } #[test] fn kurtosis() { assert::close(new!(0.0, 0.5, 1.0).kurtosis(), -2.0 / 3.0, 1e-14); } #[test] fn mean() { assert::close(new!(0.0, 0.5, 1.0).mean(), 0.5, 1e-14); assert::close( new!(-1.0, 1.5, 2.0).mean(), (1.5 * 4.0 - 1.0 + 2.0) / 6.0, 1e-14, ); assert::close( Beta::new(3.0, 3.0, -1.0, 2.0).mean(), (0.5 * 4.0 - 1.0 + 2.0) / 6.0, 1e-14, ); } #[test] fn median() { assert::close(new!(0.0, 0.5, 1.0).median(), 0.5, 1e-14); assert::close(new!(0.0, 0.3, 1.0).median(), 0.3509994849491181, 1e-14); } #[test] fn modes() { assert::close(new!(-1.0, 0.5, 2.0).modes(), vec![0.5], 1e-14); } #[test] fn sample() { for x in Independent(&new!(7.0, 20.0, 42.0), &mut source::default()).take(100) { assert!(7.0 <= x && x <= 42.0); } } #[test] fn skewness() { assert::close(new!(0.0, 0.5, 1.0).skewness(), 0.0, 1e-14); assert::close(new!(-1.0, 0.2, 2.0).skewness(), 0.17797249266332246, 1e-14); assert::close(new!(-1.0, 0.8, 2.0).skewness(), -0.17797249266332246, 1e-14); } #[test] fn variance() { assert::close(new!(0.0, 0.5, 1.0).variance(), 0.25 / 7.0, 1e-14); assert::close(new!(0.0, 0.3, 1.0).variance(), 0.033174603174603176, 1e-14); assert::close(new!(0.0, 0.9, 1.0).variance(), 0.02555555555555556, 1e-14); } }
25.875
97
0.451985
033dedd591dea4765570e08438a299db3432a087
14,249
use alloc::boxed::Box; use core::ops::{ Index, IndexMut }; use core::mem::transmute; use core::convert::TryInto; use super::*; #[repr(C)] pub struct IDT { entries: [EntryInner; 256] } impl IDT { pub fn empty() -> Box<IDT> { unsafe { Box::new_zeroed().assume_init() } } pub fn new() -> Box<IDT> { use crate::kernel::table::idt::default::*; let mut idt = IDT::empty(); idt.set_entry_new(0, panic_on_0); idt.set_entry_new(1, panic_on_1); idt.set_entry_new(2, panic_on_2); idt.set_entry_new(3, panic_on_3); idt.set_entry_new(4, panic_on_4); idt.set_entry_new(5, panic_on_5); idt.set_entry_new(6, panic_on_6); idt.set_entry_new(7, panic_on_7); idt.set_entry_new(8, panic_on_8); idt.set_entry_new(9, panic_on_9); idt.set_entry_new(10, panic_on_10); idt.set_entry_new(11, panic_on_11); idt.set_entry_new(12, panic_on_12); idt.set_entry_new(13, panic_on_13); idt.set_entry_new(14, panic_on_14); idt.set_entry_new(15, panic_on_15); idt.set_entry_new(16, panic_on_16); idt.set_entry_new(17, panic_on_17); idt.set_entry_new(18, panic_on_18); idt.set_entry_new(19, panic_on_19); idt.set_entry_new(20, panic_on_20); idt.set_entry_new(21, panic_on_21); idt.set_entry_new(22, panic_on_22); idt.set_entry_new(23, panic_on_23); idt.set_entry_new(24, panic_on_24); idt.set_entry_new(25, panic_on_25); idt.set_entry_new(26, panic_on_26); idt.set_entry_new(27, panic_on_27); idt.set_entry_new(28, panic_on_28); idt.set_entry_new(29, panic_on_29); idt.set_entry_new(30, panic_on_30); idt.set_entry_new(31, panic_on_31); idt.set_entry_new(32, panic_on_32); idt.set_entry_new(33, panic_on_33); idt.set_entry_new(34, panic_on_34); idt.set_entry_new(35, panic_on_35); idt.set_entry_new(36, panic_on_36); idt.set_entry_new(37, panic_on_37); idt.set_entry_new(38, panic_on_38); idt.set_entry_new(39, panic_on_39); idt.set_entry_new(40, panic_on_40); idt.set_entry_new(41, panic_on_41); idt.set_entry_new(42, panic_on_42); idt.set_entry_new(43, panic_on_43); idt.set_entry_new(44, panic_on_44); idt.set_entry_new(45, panic_on_45); idt.set_entry_new(46, panic_on_46); idt.set_entry_new(47, panic_on_47); idt.set_entry_new(48, panic_on_48); idt.set_entry_new(49, panic_on_49); idt.set_entry_new(50, panic_on_50); idt.set_entry_new(51, panic_on_51); idt.set_entry_new(52, panic_on_52); idt.set_entry_new(53, panic_on_53); idt.set_entry_new(54, panic_on_54); idt.set_entry_new(55, panic_on_55); idt.set_entry_new(56, panic_on_56); idt.set_entry_new(57, panic_on_57); idt.set_entry_new(58, panic_on_58); idt.set_entry_new(59, panic_on_59); idt.set_entry_new(60, panic_on_60); idt.set_entry_new(61, panic_on_61); idt.set_entry_new(62, panic_on_62); idt.set_entry_new(63, panic_on_63); idt.set_entry_new(64, panic_on_64); idt.set_entry_new(65, panic_on_65); idt.set_entry_new(66, panic_on_66); idt.set_entry_new(67, panic_on_67); idt.set_entry_new(68, panic_on_68); idt.set_entry_new(69, panic_on_69); idt.set_entry_new(70, panic_on_70); idt.set_entry_new(71, panic_on_71); idt.set_entry_new(72, panic_on_72); idt.set_entry_new(73, panic_on_73); idt.set_entry_new(74, panic_on_74); idt.set_entry_new(75, panic_on_75); idt.set_entry_new(76, panic_on_76); idt.set_entry_new(77, panic_on_77); idt.set_entry_new(78, panic_on_78); idt.set_entry_new(79, panic_on_79); idt.set_entry_new(80, panic_on_80); idt.set_entry_new(81, panic_on_81); idt.set_entry_new(82, panic_on_82); idt.set_entry_new(83, panic_on_83); idt.set_entry_new(84, panic_on_84); idt.set_entry_new(85, panic_on_85); idt.set_entry_new(86, panic_on_86); idt.set_entry_new(87, panic_on_87); idt.set_entry_new(88, panic_on_88); idt.set_entry_new(89, panic_on_89); idt.set_entry_new(90, panic_on_90); idt.set_entry_new(91, panic_on_91); idt.set_entry_new(92, panic_on_92); idt.set_entry_new(93, panic_on_93); idt.set_entry_new(94, panic_on_94); idt.set_entry_new(95, panic_on_95); idt.set_entry_new(96, panic_on_96); idt.set_entry_new(97, panic_on_97); idt.set_entry_new(98, panic_on_98); idt.set_entry_new(99, panic_on_99); idt.set_entry_new(100, panic_on_100); idt.set_entry_new(101, panic_on_101); idt.set_entry_new(102, panic_on_102); idt.set_entry_new(103, panic_on_103); idt.set_entry_new(104, panic_on_104); idt.set_entry_new(105, panic_on_105); idt.set_entry_new(106, panic_on_106); idt.set_entry_new(107, panic_on_107); idt.set_entry_new(108, panic_on_108); idt.set_entry_new(109, panic_on_109); idt.set_entry_new(110, panic_on_110); idt.set_entry_new(111, panic_on_111); idt.set_entry_new(112, panic_on_112); idt.set_entry_new(113, panic_on_113); idt.set_entry_new(114, panic_on_114); idt.set_entry_new(115, panic_on_115); idt.set_entry_new(116, panic_on_116); idt.set_entry_new(117, panic_on_117); idt.set_entry_new(118, panic_on_118); idt.set_entry_new(119, panic_on_119); idt.set_entry_new(120, panic_on_120); idt.set_entry_new(121, panic_on_121); idt.set_entry_new(122, panic_on_122); idt.set_entry_new(123, panic_on_123); idt.set_entry_new(124, panic_on_124); idt.set_entry_new(125, panic_on_125); idt.set_entry_new(126, panic_on_126); idt.set_entry_new(127, panic_on_127); idt.set_entry_new(128, panic_on_128); idt.set_entry_new(129, panic_on_129); idt.set_entry_new(130, panic_on_130); idt.set_entry_new(131, panic_on_131); idt.set_entry_new(132, panic_on_132); idt.set_entry_new(133, panic_on_133); idt.set_entry_new(134, panic_on_134); idt.set_entry_new(135, panic_on_135); idt.set_entry_new(136, panic_on_136); idt.set_entry_new(137, panic_on_137); idt.set_entry_new(138, panic_on_138); idt.set_entry_new(139, panic_on_139); idt.set_entry_new(140, panic_on_140); idt.set_entry_new(141, panic_on_141); idt.set_entry_new(142, panic_on_142); idt.set_entry_new(143, panic_on_143); idt.set_entry_new(144, panic_on_144); idt.set_entry_new(145, panic_on_145); idt.set_entry_new(146, panic_on_146); idt.set_entry_new(147, panic_on_147); idt.set_entry_new(148, panic_on_148); idt.set_entry_new(149, panic_on_149); idt.set_entry_new(150, panic_on_150); idt.set_entry_new(151, panic_on_151); idt.set_entry_new(152, panic_on_152); idt.set_entry_new(153, panic_on_153); idt.set_entry_new(154, panic_on_154); idt.set_entry_new(155, panic_on_155); idt.set_entry_new(156, panic_on_156); idt.set_entry_new(157, panic_on_157); idt.set_entry_new(158, panic_on_158); idt.set_entry_new(159, panic_on_159); idt.set_entry_new(160, panic_on_160); idt.set_entry_new(161, panic_on_161); idt.set_entry_new(162, panic_on_162); idt.set_entry_new(163, panic_on_163); idt.set_entry_new(164, panic_on_164); idt.set_entry_new(165, panic_on_165); idt.set_entry_new(166, panic_on_166); idt.set_entry_new(167, panic_on_167); idt.set_entry_new(168, panic_on_168); idt.set_entry_new(169, panic_on_169); idt.set_entry_new(170, panic_on_170); idt.set_entry_new(171, panic_on_171); idt.set_entry_new(172, panic_on_172); idt.set_entry_new(173, panic_on_173); idt.set_entry_new(174, panic_on_174); idt.set_entry_new(175, panic_on_175); idt.set_entry_new(176, panic_on_176); idt.set_entry_new(177, panic_on_177); idt.set_entry_new(178, panic_on_178); idt.set_entry_new(179, panic_on_179); idt.set_entry_new(180, panic_on_180); idt.set_entry_new(181, panic_on_181); idt.set_entry_new(182, panic_on_182); idt.set_entry_new(183, panic_on_183); idt.set_entry_new(184, panic_on_184); idt.set_entry_new(185, panic_on_185); idt.set_entry_new(186, panic_on_186); idt.set_entry_new(187, panic_on_187); idt.set_entry_new(188, panic_on_188); idt.set_entry_new(189, panic_on_189); idt.set_entry_new(190, panic_on_190); idt.set_entry_new(191, panic_on_191); idt.set_entry_new(192, panic_on_192); idt.set_entry_new(193, panic_on_193); idt.set_entry_new(194, panic_on_194); idt.set_entry_new(195, panic_on_195); idt.set_entry_new(196, panic_on_196); idt.set_entry_new(197, panic_on_197); idt.set_entry_new(198, panic_on_198); idt.set_entry_new(199, panic_on_199); idt.set_entry_new(200, panic_on_200); idt.set_entry_new(201, panic_on_201); idt.set_entry_new(202, panic_on_202); idt.set_entry_new(203, panic_on_203); idt.set_entry_new(204, panic_on_204); idt.set_entry_new(205, panic_on_205); idt.set_entry_new(206, panic_on_206); idt.set_entry_new(207, panic_on_207); idt.set_entry_new(208, panic_on_208); idt.set_entry_new(209, panic_on_209); idt.set_entry_new(210, panic_on_210); idt.set_entry_new(211, panic_on_211); idt.set_entry_new(212, panic_on_212); idt.set_entry_new(213, panic_on_213); idt.set_entry_new(214, panic_on_214); idt.set_entry_new(215, panic_on_215); idt.set_entry_new(216, panic_on_216); idt.set_entry_new(217, panic_on_217); idt.set_entry_new(218, panic_on_218); idt.set_entry_new(219, panic_on_219); idt.set_entry_new(220, panic_on_220); idt.set_entry_new(221, panic_on_221); idt.set_entry_new(222, panic_on_222); idt.set_entry_new(223, panic_on_223); idt.set_entry_new(224, panic_on_224); idt.set_entry_new(225, panic_on_225); idt.set_entry_new(226, panic_on_226); idt.set_entry_new(227, panic_on_227); idt.set_entry_new(228, panic_on_228); idt.set_entry_new(229, panic_on_229); idt.set_entry_new(230, panic_on_230); idt.set_entry_new(231, panic_on_231); idt.set_entry_new(232, panic_on_232); idt.set_entry_new(233, panic_on_233); idt.set_entry_new(234, panic_on_234); idt.set_entry_new(235, panic_on_235); idt.set_entry_new(236, panic_on_236); idt.set_entry_new(237, panic_on_237); idt.set_entry_new(238, panic_on_238); idt.set_entry_new(239, panic_on_239); idt.set_entry_new(240, panic_on_240); idt.set_entry_new(241, panic_on_241); idt.set_entry_new(242, panic_on_242); idt.set_entry_new(243, panic_on_243); idt.set_entry_new(244, panic_on_244); idt.set_entry_new(245, panic_on_245); idt.set_entry_new(246, panic_on_246); idt.set_entry_new(247, panic_on_247); idt.set_entry_new(248, panic_on_248); idt.set_entry_new(249, panic_on_249); idt.set_entry_new(250, panic_on_250); idt.set_entry_new(251, panic_on_251); idt.set_entry_new(252, panic_on_252); idt.set_entry_new(253, panic_on_253); idt.set_entry_new(254, panic_on_254); idt.set_entry_new(255, panic_on_255); idt } fn set_entry_new(&mut self, idx: usize, handler: HandlerWithError) { self.get_entry_mut::<HandlerWithError>(idx).set(handler, GateType::INTERRUPT, DPL::PRIVILEGE0, 0x20); } } impl IDT { pub unsafe fn set_for_this_cpu(idt: Box<IDT>) -> *mut IDT { let mut old_reg: IDTRegister = IDTRegister { size: 0, addr: 0 as _ }; asm!("sidt ($0)" : "=r"(&mut old_reg)); let new_idt = IDTRegister { size: core::mem::size_of::<IDT>().try_into().unwrap(), addr: Box::into_raw(idt) }; asm!("lidt ($0)" :: "r"(&new_idt) :: "volatile"); old_reg.addr } fn get_entry<T>(&self, idx: usize) -> &Entry<T> { unsafe { transmute(&self.entries[idx]) } } fn get_entry_mut<T>(&mut self, idx: usize) -> &mut Entry<T> { unsafe { transmute(&mut self.entries[idx]) } } } impl Index<Vector> for IDT { type Output = Entry<Handler>; fn index(&self, int: Vector) -> &Self::Output { self.get_entry(usize::from(int)) } } impl IndexMut<Vector> for IDT { fn index_mut(&mut self, int: Vector) -> &mut Self::Output { self.get_entry_mut(usize::from(int)) } } impl Index<VectorWithError> for IDT { type Output = Entry<HandlerWithError>; fn index(&self, int: VectorWithError) -> &Self::Output { self.get_entry(usize::from(int)) } } impl IndexMut<VectorWithError> for IDT { fn index_mut(&mut self, int: VectorWithError) -> &mut Self::Output { self.get_entry_mut(usize::from(int)) } } impl Index<usize> for IDT { type Output = Entry<Handler>; fn index(&self, int: usize) -> &Self::Output { if int > 31 { self.get_entry(usize::from(int)) } else { panic!("All interrupts below 32 are reserved and can't be assigned manually. Panicking."); } } } impl IndexMut<usize> for IDT { fn index_mut(&mut self, int: usize) -> &mut Self::Output { if int > 31 { self.get_entry_mut(usize::from(int)) } else { panic!("All interrupts below 32 are reserved and can't be assigned manually. Panicking."); } } } #[repr(packed)] struct IDTRegister { size: u16, addr: *mut IDT }
38.931694
109
0.650221
dec477dc82944d272acd8fc96e72b8a73de9c734
1,081
use yew::prelude::*; fn compile_fail() { html! { <div> }; html! { <div><div> }; html! { </div> }; html! { <div><div></div> }; html! { <div></div><div></div> }; html! { <div></span> }; html! { <tag-a></tag-b> }; html! { <div></span></div> }; html! { <img /></img> }; html! { <div>Invalid</div> }; html! { <input attr=1 attr=2 /> }; html! { <input value="123" value="456" /> }; html! { <input kind="checkbox" kind="submit" /> }; html! { <input checked=true checked=false /> }; html! { <input disabled=true disabled=false /> }; html! { <option selected=true selected=false /> }; html! { <div class="first" class="second" /> }; html! { <input checked=1 /> }; html! { <input disabled=1 /> }; html! { <option selected=1 /> }; html! { <input type=() /> }; html! { <input value=() /> }; html! { <a href=() /> }; html! { <input onclick=1 /> }; html! { <input onclick=|| () /> }; html! { <input onclick=|a, b| () /> }; html! { <input onclick=|a: String| () /> }; } fn main() {}
29.216216
54
0.479186
e9576443274e422a0d13a668cab663e7365a1f0a
2,594
use fungus::prelude::*; use serde::{Deserialize, Serialize}; use std::{convert::From, fmt}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum Component { All, Config, None, } impl Component { /// Return a vector of all Component values except Component::All and Component::None. pub fn all() -> Vec<Component> { vec![Component::Config] } /// Return a String of all the given components joined over a ', ' pub fn join<T: AsRef<[Component]>>(components: T) -> String { let result: Vec<String> = components.as_ref().iter().map(|x| x.to_string()).collect(); result.join(", ") } } impl From<&str> for Component { fn from(val: &str) -> Self { match val.to_lowercase().as_ref() { "all" => Component::All, "config" => Component::Config, _ => Component::None, } } } impl From<String> for Component { fn from(val: String) -> Self { Component::from(val.as_str()) } } // Implement format! support impl fmt::Display for Component { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { _ => write!(f, "{}", format!("{:?}", self).to_lowercase()), } } } #[cfg(test)] mod tests { use crate::prelude::*; #[test] fn test_errors() { assert_eq!( format!("{}", ComponentError::InvalidName), "invalid component name was given" ); } #[test] fn test_all() { assert_eq!(Component::all(), vec![Component::Config,]); } #[test] fn test_join() { assert_eq!(Component::join(vec![Component::Config,]), "config"); } #[test] fn test_component_from() { // str assert_eq!(Component::from("All"), Component::All); assert_eq!(Component::from("conFig"), Component::Config); assert_eq!(Component::from("foo"), Component::None); // String assert_eq!(Component::from("All".to_string()), Component::All); assert_eq!(Component::from("confIG".to_string()), Component::Config); assert_eq!(Component::from("foo".to_string()), Component::None); assert_eq!(Component::All.to_string(), "all"); assert_eq!(Component::Config.to_string(), "config"); assert_eq!(Component::None.to_string(), "none"); } #[test] fn test_debug_string() { assert_eq!(format!("{}", Component::All), "all"); assert_eq!(format!("{}", Component::Config), "config"); assert_eq!(format!("{}", Component::None), "none"); } }
27.020833
94
0.566307
bfed7941ca61ad2065b066f58f7be7c269f96db4
5,216
#[macro_use] extern crate quote; extern crate syn; use syn::{Attribute, Item, ItemFn, ItemMod, Lit, Meta, MetaNameValue}; use std::env; use std::io::{Read, Write}; use std::fs::File; use std::path::{Path, PathBuf}; /// Look for a simple attribute matching a string fn any_attr_is(attrs: &[Attribute], ident: &str) -> bool { attrs.iter().any(|a| match a.interpret_meta() { Some(Meta::Word(i)) if i == ident => true, _ => false }) } /// Parse a list of items for #[register]ed functions (recurses into modules) /// /// mod_path: parent dir of the mod we are parsing /// items: list of items in the current mod /// /// Returns a list of item paths (relative to the current module) fn parse(mod_path: PathBuf, items: Vec<Item>) -> Vec<syn::Path> { let mut names = vec![]; for item in items { match item { // handle a registered function Item::Fn(ItemFn { ref attrs, ident, .. }) if any_attr_is(attrs, "register") => { names.push(ident.into()); } // handle a module Item::Mod(module) => { let (the_path, the_items, the_ident); // what kind of module is it? match module { // inline module! ItemMod { content: Some((_, items)), ident, .. } => { the_items = items; the_ident = ident; the_path = mod_path.clone(); } // non-inline module! ItemMod { attrs, ident, .. } => { // read the #[path] attr if present let mut path = None; for attr in attrs { match attr.interpret_meta() { Some(Meta::NameValue(MetaNameValue { ident, lit: Lit::Str(ref s), .. })) if ident == "path" => { path = Some(s.value()); } _ => {} } } // read in the module contents from file, wherever it is let mut content = String::new(); let mut file = match path { // from a path attribute Some(p) => { the_path = Path::new(&p).parent().unwrap().to_owned(); File::open(&p).expect(&p) } // no path attribute -- try $name.rs and $name/mod.rs None => { match File::open(mod_path.join(format!("{}.rs", ident))) { Ok(file) => { the_path = mod_path.clone(); file } Err(_) => { the_path = mod_path.join(ident.as_ref()); File::open(mod_path.join(ident.as_ref()).join("mod.rs")).expect(&format!("{}/{}/mod.rs", mod_path.display(), ident)) } } } }; file.read_to_string(&mut content).unwrap(); the_items = syn::parse_file(&content).unwrap().items; the_ident = ident; } } // recurse to find registered functions within the new module names.extend( parse(the_path, the_items) .into_iter() .map(|mut p| { // prepend the module path to the found items p.segments.insert(0, the_ident.into()); p }) ); } _ => {} } } names } /// Find registered functions in the given crate. Call this in your build script! /// /// root: path to the crate root (e.g. src/main.rs or src/lib.rs) pub fn go<P: AsRef<Path>>(root: P) { let root = root.as_ref(); let outfile = Path::new(&env::var("OUT_DIR").unwrap()).join("macbuild.rs"); // Exfiltrate the name of the generated file so that macbuild!() can include it println!("cargo:rustc-env=MACBUILD={}", outfile.display()); // Get registered functions from the crate let mut content = String::new(); File::open(root).unwrap().read_to_string(&mut content).unwrap(); let ast = syn::parse_file(&content).unwrap(); let names = parse(root.parent().unwrap().to_owned(), ast.items); // Generate bootstrap function let mut out = File::create(outfile).unwrap(); writeln!(out, "{}", quote! { pub fn bootstrap() { #(::#names();)* } }).unwrap(); }
38.925373
156
0.42408
642e38186cffa58f1916ef632e52c82620af087f
2,788
//! This example demonstrates how to use the ExtiPin trait for a GPIO input pin to capture a //! push button press. This example was written for the Nucleo-F767ZI board from the Nucleo-144 //! family of boards, targeting the STM32F767ZI microcontroller. To port this to another board, //! change the GPIOs used for the push button and for the debug LED. Note that the EXTI number //! may change if using a new button, meaning that the interrupt handler will need to change also. //! //! The intended behavior of the example is that when the user presses the button, an LED is //! toggled. #![no_main] #![no_std] extern crate panic_halt; use core::cell::{Cell, RefCell}; use cortex_m::interrupt::{free, Mutex}; use cortex_m::peripheral::NVIC; use cortex_m_rt::entry; use stm32f7xx_hal::gpio::gpioc::PC13; use stm32f7xx_hal::gpio::{Edge, ExtiPin, Floating, Input}; use stm32f7xx_hal::{interrupt, pac, prelude::*}; // Semaphore for synchronization static SEMAPHORE: Mutex<Cell<bool>> = Mutex::new(Cell::new(true)); // GPIO pin that main thread and interrupt handler must share static BUTTON_PIN: Mutex<RefCell<Option<PC13<Input<Floating>>>>> = Mutex::new(RefCell::new(None)); #[entry] fn main() -> ! { let pac_periph = pac::Peripherals::take().unwrap(); // Debug LED configuration let gpiob = pac_periph.GPIOB.split(); let mut led1 = gpiob.pb0.into_push_pull_output(); // Push button configuration let mut rcc = pac_periph.RCC; let mut syscfg = pac_periph.SYSCFG; let mut exti = pac_periph.EXTI; let gpioc = pac_periph.GPIOC.split(); let mut button = gpioc.pc13.into_floating_input(); button.make_interrupt_source(&mut syscfg, &mut rcc); button.trigger_on_edge(&mut exti, Edge::Rising); button.enable_interrupt(&mut exti); // Freeze clocks rcc.constrain().cfgr.sysclk(216_000_000.Hz()).freeze(); // Save information needed by the interrupt handler to the global variable free(|cs| { BUTTON_PIN.borrow(cs).replace(Some(button)); }); // Enable the button interrupt unsafe { NVIC::unmask::<interrupt>(interrupt::EXTI15_10); } loop { // Wait for the interrupt to fire free(|cs| { if SEMAPHORE.borrow(cs).get() == false { // Toggle debug LED led1.toggle(); SEMAPHORE.borrow(cs).set(true); } }); } } #[interrupt] fn EXTI15_10() { free(|cs| { match BUTTON_PIN.borrow(cs).borrow_mut().as_mut() { // Clear the push button interrupt Some(b) => b.clear_interrupt_pending_bit(), // This should never happen None => (), } // Signal that the interrupt fired SEMAPHORE.borrow(cs).set(false); }); }
31.681818
98
0.65495
56e16d01b381592e0a17355ee9ec35b3bfed881a
5,091
#![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::{Currency, ExistenceRequirement}; pub use pallet::*; pub use crate::weights::WeightInfo; use gafi_primitives::cache::Cache; #[cfg(test)] mod mock; #[cfg(test)] mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; pub mod weights; pub use weights::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, BoundedVec}; use frame_system::pallet_prelude::*; pub type BalanceOf<T> = <<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance; pub type AccountOf<T> = <T as frame_system::Config>::AccountId; #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; /// The currency mechanism. type Currency: Currency<Self::AccountId>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// Number of accounts that will send the tokens for user. #[pallet::constant] type MaxGenesisAccount: Get<u32>; /// Add Cache type Cache: Cache<Self::AccountId, AccountOf<Self>, u128>; } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet<T>(_); /// Holding all the accounts #[pallet::storage] pub(super) type GenesisAccounts<T: Config> = StorageValue<_, BoundedVec<T::AccountId, T::MaxGenesisAccount>, ValueQuery>; #[pallet::storage] pub type FaucetAmount<T: Config> = StorageValue<_, BalanceOf<T>, ValueQuery>; //** Genesis Conguration **// #[pallet::genesis_config] pub struct GenesisConfig<T: Config> { pub genesis_accounts: Vec<T::AccountId>, pub faucet_amount: BalanceOf<T>, } #[cfg(feature = "std")] impl<T: Config> Default for GenesisConfig<T> { fn default() -> Self { Self { genesis_accounts: vec![], faucet_amount: BalanceOf::<T>::default()} } } #[pallet::genesis_build] impl<T: Config> GenesisBuild<T> for GenesisConfig<T> { fn build(&self) { for i in 0..self.genesis_accounts.len() { <GenesisAccounts<T>>::try_append(self.genesis_accounts[i].clone()) .map_or((), |_| {}); } FaucetAmount::<T>::put(self.faucet_amount); } } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { Transferred(T::AccountId, T::AccountId, BalanceOf<T>), } #[pallet::error] pub enum Error<T> { TransferToSelf, NotEnoughBalance, DontBeGreedy, PleaseWait } #[pallet::call] impl<T: Config> Pallet<T> { /// faucet /// /// The origin must be Signed /// /// Weight: `O(1)` #[pallet::weight(( 0, DispatchClass::Normal, Pays::No ))] pub fn faucet(origin: OriginFor<T>) -> DispatchResult { let sender = ensure_signed(origin)?; let genesis_accounts = GenesisAccounts::<T>::get(); let faucet_amount = FaucetAmount::<T>::get(); ensure!(Self::get_cache(&sender) == None, <Error<T>>::PleaseWait); ensure!( T::Currency::free_balance(&sender) < (faucet_amount / 10u128.try_into().ok().unwrap()), <Error<T>>::DontBeGreedy ); for account in genesis_accounts { match T::Currency::transfer( &account, &sender, faucet_amount, ExistenceRequirement::KeepAlive, ) { Ok(_) => { Self::insert_cache(sender, faucet_amount); return Ok(()) }, Err(_) => continue, } } Err(DispatchError::Other("Out of Faucet")) } /// donate /// /// The origin must be Signed /// /// Parameters: /// - `amount`: donation amount /// /// Weight: `O(1)` #[pallet::weight(<T as pallet::Config>::WeightInfo::donate(50u32))] pub fn donate( origin: OriginFor<T>, amount: BalanceOf<T>, ) -> DispatchResult { let from = ensure_signed(origin)?; ensure!(T::Currency::free_balance(&from) > amount, <Error<T>>::NotEnoughBalance); let genesis_accounts = GenesisAccounts::<T>::get(); ensure!(genesis_accounts[0] != from, <Error<T>>::TransferToSelf); T::Currency::transfer( &from, &genesis_accounts[0], amount, ExistenceRequirement::KeepAlive, )?; Self::deposit_event(Event::Transferred(from, genesis_accounts[0].clone(), amount)); Ok(()) } } impl<T: Config> Pallet<T> { fn insert_cache(sender: T::AccountId, faucet_amount: BalanceOf<T>)-> Option<()> { match faucet_amount.try_into(){ Ok(value) => Some(T::Cache::insert(&sender, sender.clone(), value)), Err(_) => None, } } fn get_cache(sender: &T::AccountId) -> Option<u128> { if let Some(faucet_cache) = T::Cache::get(&sender, sender.clone()) { return Some(faucet_cache); } None } } } #[cfg(feature = "std")] impl<T: Config> GenesisConfig<T> { pub fn build_storage(&self) -> Result<sp_runtime::Storage, String> { <Self as frame_support::pallet_prelude::GenesisBuild<T>>::build_storage(self) } pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { <Self as frame_support::pallet_prelude::GenesisBuild<T>>::assimilate_storage(self, storage) } }
25.455
93
0.659006
4bf8fb704275d53309627e3b283e1878bd51e32a
1,212
// Copyright 2019, The Gtk-rs Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> use gdk_sys; use glib::translate::*; use std::mem; use std::ptr; use Keymap; use KeymapKey; impl Keymap { pub fn get_entries_for_keycode( &self, hardware_keycode: u32, ) -> Option<(Vec<KeymapKey>, Vec<u32>)> { unsafe { let mut keys: *mut gdk_sys::GdkKeymapKey = ptr::null_mut(); let mut keyvals = ptr::null_mut(); let mut n_entries = mem::uninitialized(); let ret = from_glib(gdk_sys::gdk_keymap_get_entries_for_keycode( self.to_glib_none().0, hardware_keycode, &mut keys, &mut keyvals, &mut n_entries, )); if ret { Some(( FromGlibContainer::from_glib_full_num(keys, n_entries as usize), FromGlibContainer::from_glib_full_num(keyvals, n_entries as usize), )) } else { None } } } }
31.076923
95
0.554455
878a4549f74395ea15bea4057da10558b1ca3d30
1,104
const H: i8 = 4; const W: i8 = 4; const END: (i8, i8) = (W - 1, H - 1); fn longest(code: &mut String, path: &mut String, x: i8, y: i8) -> Option<usize> { let len = code.len(); code.extend(path.chars()); let hash = md5::compute(&*code); code.truncate(len); [ (0, -1, 'U'), (0, 1, 'D'), (-1, 0, 'L'), (1, 0, 'R'), ].iter() .zip(hash.into_iter().flat_map(|&x| [x >> 4 & 0xF, x & 0xF].iter().copied().collect::<Vec<_>>())) .filter_map(|(&(dx, dy, dir), status)| { let x = x + dx; let y = y + dy; if status < 0xB || x < 0 || y < 0 || x >= W || y >= H { None } else if (x, y) == END { Some(path.len() + 1) } else { path.push(dir); let this_longest = longest(code, path, x, y); path.pop(); this_longest } }).max() } fn main() { let mut input = String::new(); std::io::stdin().read_line(&mut input).unwrap(); let mut path = String::new(); println!("{}", longest(&mut input, &mut path, 0, 0).unwrap()); }
28.307692
102
0.445652
d5a316f6143e8188b4586f07c62930fe0f11b0b7
2,850
use rand::prelude::*; use std::cmp::{max, min}; use std::collections::BTreeSet; use filters::Filter; use images::Image; use Geometry; pub struct Cow { min_radius: u32, max_radius: u32, n: u32, allow_duplicates: bool, geometry: Option<Geometry>, } impl Cow { pub fn new() -> Cow { Cow { min_radius: 10, max_radius: 20, n: 3, allow_duplicates: true, geometry: None, } } pub fn circles(self, n: u32) -> Self { Cow { n: n, ..self } } pub fn min_radius(self, min_radius: u32) -> Self { Cow { min_radius: min_radius, ..self } } pub fn max_radius(self, max_radius: u32) -> Self { Cow { max_radius: max_radius, ..self } } // right + bottom = inclusive pub fn area(self, g: Geometry) -> Self { Cow { geometry: Some(g), ..self } } fn get_pixels(x: i32, y: i32, r: i32, i: &mut Image) -> Vec<(u32, u32)> { let h = i.height() as i32; let w = i.width() as i32; let mut v = vec![]; for py in max(y - r, 0)..min(y + r, h) { for px in max(x - r, 0)..min(x + r, w) { let dy = y - py; let dx = x - px; let d = ((dy * dy + dx * dx) as f32).sqrt() as i32; if d <= r { v.push((px as u32, py as u32)); } } } v } fn invert_pixels(v: &[(u32, u32)], i: &mut Image) { for &(x, y) in v { let mut p = i.get_pixel(x, y); p.invert(); i.put_pixel(x as u32, y as u32, p); } } } impl Filter for Cow { fn apply(&self, i: &mut Image) { let mut rng = thread_rng(); let g = match self.geometry { Some(ref x) => x.clone(), None => Geometry::new(0, i.width() - 1, 0, i.height() - 1), }; let mut pixels = vec![( rng.gen_range(g.left, g.right + 1), rng.gen_range(g.top, g.bottom + 1), )]; let mut set = BTreeSet::new(); for _ in 0..self.n { let mut rng = thread_rng(); let p = pixels.choose(&mut rng).expect("failed").clone(); let r = rng.gen_range(self.min_radius, self.max_radius + 1) as i32; let v = Self::get_pixels(p.0 as i32, p.1 as i32, r, i); if self.allow_duplicates { pixels.extend(&v); } else { for p in v { if !set.contains(&p) { pixels.push(p); set.insert(p); } } } } Self::invert_pixels(&pixels, i); } }
24.152542
79
0.428772
394604831ba106834c98f161f1d9f11530675fa7
6,433
//! Components for navigating file systems. use crate::dsp::Samples; use crate::io::{audio, path}; use crate::ui; use crate::view::View; use crossterm::event::{KeyCode, KeyEvent}; use std::borrow::ToOwned; use std::path::PathBuf; use tui::backend::Backend; use tui::layout::Rect; use tui::style::{Modifier, Style}; use tui::terminal::Frame; use tui::text::Text; use tui::widgets::{Block, Borders, Clear, List, ListItem, ListState, Paragraph}; /// A UI view for navigating the file system, reading audio files, and writing audio files. pub struct File { cwd: PathBuf, files: Vec<(String, bool)>, mode: Mode, state: ListState, type_buffer: String, } impl File { /// Attempt to create a File view. /// /// # Errors /// /// Will return `Err` if `path` does not exist or contains invalid audio data. pub fn try_new(cwd: PathBuf) -> eyre::Result<Self> { let files = path::sorted_names(&cwd)?; Ok(Self { cwd, files, mode: Mode::Nagivate, state: ListState::default(), type_buffer: String::new(), }) } /// Change working directory and load its files. fn chdir(&mut self, cwd: PathBuf) { self.cwd = cwd; self.files = path::sorted_names(&self.cwd) .unwrap_or_else(|error| vec![(format!("{}", error), false)]); self.mode = Mode::Nagivate; self.state = ListState::default(); } /// Handle key events while in navigate mode. fn key_event_navigate(&mut self, event: KeyEvent) { match event.code { KeyCode::Char('w') => self.mode = Mode::Type, KeyCode::Down => self.next(), KeyCode::Enter => { if let Some(index) = self.state.selected() { let (_name, is_dir) = &self.files[index]; if !*is_dir { self.mode = Mode::Read; } }; } KeyCode::Left => { let option = self.cwd.parent().map(ToOwned::to_owned); if let Some(path_ref) = option { self.chdir(path_ref) } } KeyCode::Right => { if let Some(index) = self.state.selected() { let (name, is_dir) = &self.files[index]; if *is_dir { let path = self.cwd.join(name); self.chdir(path); } }; } KeyCode::Up => self.previous(), _ => (), } } /// Handle key events while in type mode. fn key_event_type(&mut self, event: KeyEvent) { match event.code { KeyCode::Backspace | KeyCode::Delete => { self.type_buffer.pop(); } KeyCode::Enter => self.mode = Mode::Write, KeyCode::Char(char) => { self.type_buffer.push(char); } _ => (), } } /// Modular move list state to next inode. fn next(&mut self) { let index = match self.state.selected() { Some(index) => { if index >= self.files.len() - 1 { 0 } else { index + 1 } } None => 0, }; self.state.select(Some(index)); } /// Modular move list state to previous inode. fn previous(&mut self) { let index = match self.state.selected() { Some(index) => { if index == 0 { self.files.len() - 1 } else { index - 1 } } None => 0, }; self.state.select(Some(index)); } } impl<B: Backend> View<B> for File { fn key_event(&mut self, event: KeyEvent) { match self.mode { Mode::Nagivate => self.key_event_navigate(event), Mode::Type => self.key_event_type(event), _ => (), } } fn process(&mut self, samples: &mut Samples) { match self.mode { Mode::Read => { if let Some(index) = self.state.selected() { let (name, _is_dir) = &self.files[index]; let path = self.cwd.join(name); match audio::read_samples(&path) { Ok(buffer) => *samples = buffer, Err(error) => self.files = vec![(format!("{}", error), false)], }; }; self.mode = Mode::Nagivate; } Mode::Write => { let path = self.cwd.join(&self.type_buffer); if let Err(error) = audio::write_samples(&path, samples) { self.files = vec![(format!("{}", error), false)]; }; self.type_buffer.clear(); self.chdir(self.cwd.clone()); self.mode = Mode::Nagivate; } _ => (), } } fn render<'b>(&mut self, frame: &mut Frame<'b, B>, area: Rect) { let entries: Vec<ListItem> = self .files .iter() .map(|(file, is_dir)| { let item = ListItem::new(file.as_ref()); if *is_dir { item.style(Style::default().add_modifier(Modifier::BOLD)) } else { item } }) .collect(); let block = Block::default().borders(Borders::ALL); let list = List::new(entries) .block(block) .highlight_style(Style::default().add_modifier(Modifier::ITALIC)) .highlight_symbol("> "); frame.render_stateful_widget(list, area, &mut self.state); if self.mode == Mode::Type { let area = ui::util::centered_rectangle(60, 20, area); frame.render_widget(Clear, area); let block = Block::default().title("Write").borders(Borders::ALL); let text = Text::from(self.type_buffer.as_ref()); let line = Paragraph::new(text).block(block); frame.render_widget(line, area); } } } #[derive(Debug, Eq, PartialEq)] enum Mode { Read, Nagivate, Type, Write, } #[cfg(test)] mod tests {}
29.374429
91
0.472097
1a1d9eebb1a45f5b3746a637fba22e4e4077acc6
71,710
use std::collections::HashMap; use std::iter::FromIterator; use crate::{ parse_tree::{AsmOp, AsmRegister, LazyOp, Literal, Visibility}, semantic_analysis::{ast_node::TypedCodeBlock, ast_node::*, *}, type_engine::*, }; use sway_types::{ident::Ident, span::Span}; use sway_ir::*; // ------------------------------------------------------------------------------------------------- // XXX This needs to return a CompileResult. OTOH, retrofitting a CompileResult here would add // very little value and require a lot of work. An alternative might be returning // Result<T, CompileError>. pub(crate) fn compile_ast(ast: TypedParseTree) -> Result<Context, String> { let mut ctx = Context::default(); match ast { TypedParseTree::Script { namespace, main_function, declarations, all_nodes: _, } => compile_script(&mut ctx, main_function, namespace, declarations), TypedParseTree::Predicate { namespace: _, main_function: _, declarations: _, all_nodes: _, } => unimplemented!("compile predicate to ir"), TypedParseTree::Contract { abi_entries, namespace, declarations, all_nodes: _, } => compile_contract(&mut ctx, abi_entries, namespace, declarations), TypedParseTree::Library { namespace: _, all_nodes: _, } => unimplemented!("compile library to ir"), }?; ctx.verify().map_err(|ir_error| ir_error.to_string())?; Ok(ctx) } // ------------------------------------------------------------------------------------------------- fn compile_script( context: &mut Context, main_function: TypedFunctionDeclaration, namespace: NamespaceRef, declarations: Vec<TypedDeclaration>, ) -> Result<Module, String> { let module = Module::new(context, Kind::Script); let mut struct_names = StructSymbolMap::default(); compile_constants(context, module, namespace, false)?; compile_declarations(context, module, &mut struct_names, declarations)?; compile_function(context, module, &mut struct_names, main_function)?; Ok(module) } fn compile_contract( context: &mut Context, abi_entries: Vec<TypedFunctionDeclaration>, namespace: NamespaceRef, declarations: Vec<TypedDeclaration>, ) -> Result<Module, String> { let module = Module::new(context, Kind::Contract); let mut struct_names = StructSymbolMap::default(); compile_constants(context, module, namespace, false)?; compile_declarations(context, module, &mut struct_names, declarations)?; for decl in abi_entries { compile_abi_method(context, module, &mut struct_names, decl)?; } Ok(module) } // ------------------------------------------------------------------------------------------------- fn compile_constants( context: &mut Context, module: Module, namespace: NamespaceRef, public_only: bool, ) -> Result<(), String> { read_module( |ns| -> Result<(), String> { for decl in ns.get_all_declared_symbols() { let decl_name_value = match decl { TypedDeclaration::ConstantDeclaration(TypedConstantDeclaration { name, value, visibility, }) => { // XXX Do we really only add public constants? if !public_only || matches!(visibility, Visibility::Public) { Some((name, value)) } else { None } } TypedDeclaration::VariableDeclaration(TypedVariableDeclaration { name, body, const_decl_origin, .. }) if *const_decl_origin => Some((name, body)), _otherwise => None, }; if let Some((name, value)) = decl_name_value { let const_val = compile_constant_expression(context, value)?; module.add_global_constant(context, name.as_str().to_owned(), const_val); } } for ns_ix in ns.get_all_imported_modules().filter(|x| **x != namespace) { compile_constants(context, module, *ns_ix, true)?; } Ok(()) }, namespace, )?; Ok(()) } fn compile_constant_expression( context: &mut Context, const_expr: &TypedExpression, ) -> Result<Value, String> { if let TypedExpressionVariant::Literal(literal) = &const_expr.expression { let span_md_idx = MetadataIndex::from_span(context, &const_expr.span); Ok(convert_literal_to_value(context, literal, span_md_idx)) } else { Err("Unsupported constant expression type.".into()) } } // ------------------------------------------------------------------------------------------------- // We don't really need to compile these declarations other than `const`s since: // a) function decls are inlined into their call site and can be (re)created there, though ideally // we'd give them their proper name by compiling them here. // b) struct decls are also inlined at their instantiation site. // c) ditto for enums. // // And for structs and enums in particular, we must ignore those with embedded generic types as // they are monomorphised only at the instantation site. We must ignore the generic declarations // altogether anyway. fn compile_declarations( context: &mut Context, module: Module, _struct_names: &mut StructSymbolMap, declarations: Vec<TypedDeclaration>, ) -> Result<(), String> { for declaration in declarations { match declaration { TypedDeclaration::ConstantDeclaration(decl) => { // These are in the global scope for the module, so they can be added there. let const_val = compile_constant_expression(context, &decl.value)?; module.add_global_constant(context, decl.name.as_str().to_owned(), const_val); } TypedDeclaration::FunctionDeclaration(_decl) => { // We no longer compile functions other than `main()` until we can improve the name // resolution. Currently there isn't enough information in the AST to fully // distinguish similarly named functions and especially trait methods. // //compile_function(context, module, struct_names, decl).map(|_| ())? } TypedDeclaration::ImplTrait { methods: _, type_implementing_for: _, .. } => { // And for the same reason we don't need to compile impls at all. // // compile_impl( // context, // module, // struct_names, // type_implementing_for, // methods, //)?, } TypedDeclaration::StructDeclaration(_) | TypedDeclaration::TraitDeclaration(_) | TypedDeclaration::EnumDeclaration(_) | TypedDeclaration::VariableDeclaration(_) | TypedDeclaration::Reassignment(_) | TypedDeclaration::AbiDeclaration(_) | TypedDeclaration::GenericTypeForFunctionScope { .. } | TypedDeclaration::ErrorRecovery => (), } } Ok(()) } // ------------------------------------------------------------------------------------------------- #[derive(Clone, Default)] struct StructSymbolMap { aggregate_names: HashMap<String, Aggregate>, aggregate_symbols: HashMap<Aggregate, HashMap<String, u64>>, } impl StructSymbolMap { pub fn add_aggregate_symbols( &mut self, name: String, aggregate: Aggregate, symbols: Option<HashMap<String, u64>>, ) -> Result<(), String> { match self.aggregate_names.insert(name, aggregate) { None => Ok(()), Some(_) => Err("Aggregate symbols were overwritten/shadowed.".to_owned()), }?; symbols .map( |symbols| match self.aggregate_symbols.insert(aggregate, symbols) { None => Ok(()), Some(_) => Err("Aggregate symbols were overwritten/shadowed.".to_owned()), }, ) .unwrap_or(Ok(())) } pub fn get_aggregate_by_name(&self, name: &str) -> Option<Aggregate> { self.aggregate_names.get(name).copied() } pub fn get_aggregate_index(&self, aggregate: &Aggregate, field_name: &str) -> Option<u64> { self.aggregate_symbols .get(aggregate) .and_then(|idx_map| idx_map.get(field_name).copied()) } } // ------------------------------------------------------------------------------------------------- fn create_struct_aggregate( context: &mut Context, struct_names: &mut StructSymbolMap, name: String, fields: Vec<OwnedTypedStructField>, ) -> Result<Aggregate, String> { let (field_types, syms): (Vec<_>, Vec<_>) = fields .into_iter() .map(|tsf| { ( convert_resolved_typeid_no_span(context, struct_names, &tsf.r#type), tsf.name, ) }) .unzip(); let field_types = field_types .into_iter() .collect::<Result<Vec<_>, String>>()?; let aggregate = Aggregate::new_struct(context, field_types); struct_names.add_aggregate_symbols( name, aggregate, Some(HashMap::from_iter( syms.into_iter().enumerate().map(|(n, sym)| (sym, n as u64)), )), )?; Ok(aggregate) } // ------------------------------------------------------------------------------------------------- fn compile_enum_decl( context: &mut Context, struct_names: &mut StructSymbolMap, enum_decl: TypedEnumDeclaration, ) -> Result<Aggregate, String> { let TypedEnumDeclaration { name, type_parameters, variants, .. } = enum_decl; if !type_parameters.is_empty() { return Err("Unable to compile generic enums.".into()); } create_enum_aggregate( context, struct_names, name.as_str().to_owned(), variants .into_iter() .map(|tev| tev.as_owned_typed_enum_variant()) .collect(), ) } fn create_enum_aggregate( context: &mut Context, struct_names: &mut StructSymbolMap, name: String, variants: Vec<OwnedTypedEnumVariant>, ) -> Result<Aggregate, String> { // Create the enum aggregate first. NOTE: single variant enums don't need an aggregate but are // getting one here anyway. They don't need to be a tagged union either. let field_types: Vec<_> = variants .into_iter() .map(|tev| convert_resolved_typeid_no_span(context, struct_names, &tev.r#type)) .collect::<Result<Vec<_>, String>>()?; let enum_aggregate = Aggregate::new_struct(context, field_types); struct_names.add_aggregate_symbols(name.clone() + "_union", enum_aggregate, None)?; // Create the tagged union struct next. let tagged_union = Aggregate::new_struct(context, vec![Type::Uint(64), Type::Union(enum_aggregate)]); struct_names.add_aggregate_symbols(name, tagged_union, None)?; Ok(tagged_union) } // ------------------------------------------------------------------------------------------------- fn create_tuple_aggregate( context: &mut Context, struct_names: &mut StructSymbolMap, fields: Vec<TypeId>, ) -> Result<Aggregate, String> { let field_types = fields .into_iter() .map(|ty_id| convert_resolved_typeid_no_span(context, struct_names, &ty_id)) .collect::<Result<Vec<_>, String>>()?; Ok(Aggregate::new_struct(context, field_types)) } // ------------------------------------------------------------------------------------------------- fn compile_function( context: &mut Context, module: Module, struct_names: &mut StructSymbolMap, ast_fn_decl: TypedFunctionDeclaration, ) -> Result<Option<Function>, String> { // Currently monomorphisation of generics is inlined into main() and the functions with generic // args are still present in the AST declarations, but they can be ignored. if !ast_fn_decl.type_parameters.is_empty() { Ok(None) } else { let args = ast_fn_decl .parameters .iter() .map(|param| { convert_resolved_typeid(context, struct_names, &param.r#type, &param.type_span) .map(|ty| (param.name.as_str().into(), ty, param.name.span().clone())) }) .collect::<Result<Vec<(String, Type, Span)>, String>>()?; compile_fn_with_args(context, module, struct_names, ast_fn_decl, args, None).map(&Some) } } // ------------------------------------------------------------------------------------------------- fn compile_fn_with_args( context: &mut Context, module: Module, struct_names: &mut StructSymbolMap, ast_fn_decl: TypedFunctionDeclaration, args: Vec<(String, Type, Span)>, selector: Option<[u8; 4]>, ) -> Result<Function, String> { let TypedFunctionDeclaration { name, body, return_type, return_type_span, visibility, .. } = ast_fn_decl; let args = args .into_iter() .map(|(name, ty, span)| (name, ty, MetadataIndex::from_span(context, &span))) .collect(); let ret_type = convert_resolved_typeid(context, struct_names, &return_type, &return_type_span)?; let func = Function::new( context, module, name.as_str().to_owned(), args, ret_type, selector, visibility == Visibility::Public, ); // We clone the struct symbols here, as they contain the globals; any new local declarations // may remain within the function scope. let mut compiler = FnCompiler::new(context, module, func, struct_names.clone()); let ret_val = compiler.compile_code_block(context, body)?; compiler .current_block .ins(context) .ret(ret_val, ret_type, None); Ok(func) } // ------------------------------------------------------------------------------------------------- /* Disabled until we can improve symbol resolution. See comments above in compile_declarations(). fn compile_impl( context: &mut Context, module: Module, struct_names: &mut StructSymbolMap, self_type: TypeInfo, ast_methods: Vec<TypedFunctionDeclaration>, ) -> Result<(), String> { for method in ast_methods { let args = method .parameters .iter() .map(|param| { if param.name.as_str() == "self" { convert_resolved_type(context, struct_names, &self_type) } else { convert_resolved_typeid(context, struct_names, &param.r#type, &param.type_span) } .map(|ty| (param.name.as_str().into(), ty, param.name.span().clone())) }) .collect::<Result<Vec<(String, Type, Span)>, String>>()?; compile_fn_with_args(context, module, struct_names, method, args, None)?; } Ok(()) } */ // ------------------------------------------------------------------------------------------------- fn compile_abi_method( context: &mut Context, module: Module, struct_names: &mut StructSymbolMap, ast_fn_decl: TypedFunctionDeclaration, ) -> Result<Function, String> { let selector = ast_fn_decl.to_fn_selector_value().value.ok_or(format!( "Cannot generate selector for ABI method: {}", ast_fn_decl.name.as_str() ))?; let args = ast_fn_decl .parameters .iter() .map(|param| { convert_resolved_typeid(context, struct_names, &param.r#type, &param.type_span) .map(|ty| (param.name.as_str().into(), ty, param.name.span().clone())) }) .collect::<Result<Vec<(String, Type, Span)>, String>>()?; compile_fn_with_args( context, module, struct_names, ast_fn_decl, args, Some(selector), ) } // ------------------------------------------------------------------------------------------------- struct FnCompiler { module: Module, function: Function, current_block: Block, symbol_map: HashMap<String, String>, struct_names: StructSymbolMap, } impl FnCompiler { fn new( context: &mut Context, module: Module, function: Function, struct_names: StructSymbolMap, ) -> Self { let symbol_map = HashMap::from_iter( function .args_iter(context) .map(|(name, _value)| (name.clone(), name.clone())), ); FnCompiler { module, function, current_block: function.get_entry_block(context), symbol_map, struct_names, } } // --------------------------------------------------------------------------------------------- fn compile_code_block( &mut self, context: &mut Context, ast_block: TypedCodeBlock, ) -> Result<Value, String> { ast_block .contents .into_iter() .map(|ast_node| { let span_md_idx = MetadataIndex::from_span(context, &ast_node.span); match ast_node.content { TypedAstNodeContent::ReturnStatement(trs) => { self.compile_return_statement(context, trs.expr) } TypedAstNodeContent::Declaration(td) => match td { TypedDeclaration::VariableDeclaration(tvd) => { self.compile_var_decl(context, tvd, span_md_idx) } TypedDeclaration::ConstantDeclaration(tcd) => { self.compile_const_decl(context, tcd, span_md_idx) } TypedDeclaration::FunctionDeclaration(_) => Err("func decl".into()), TypedDeclaration::TraitDeclaration(_) => Err("trait decl".into()), TypedDeclaration::StructDeclaration(_) => Err("struct decl".into()), TypedDeclaration::EnumDeclaration(ted) => { let span_md_idx = MetadataIndex::from_span(context, &ted.span); compile_enum_decl(context, &mut self.struct_names, ted).map(|_| ())?; Ok(Constant::get_unit(context, span_md_idx)) } TypedDeclaration::Reassignment(tr) => { self.compile_reassignment(context, tr, span_md_idx) } TypedDeclaration::ImplTrait { span, .. } => { // XXX What if I ignore the trait implementation??? Potentially since // we currently inline everything and below we 'recreate' the functions // lazily as they are called, nothing needs to be done here. BUT! // This is obviously not really correct, and eventually we want to // compile and then call these properly. let span_md_idx = MetadataIndex::from_span(context, &span); Ok(Constant::get_unit(context, span_md_idx)) } TypedDeclaration::AbiDeclaration(_) => Err("abi decl".into()), TypedDeclaration::GenericTypeForFunctionScope { .. } => { Err("gen ty for fn scope".into()) } TypedDeclaration::ErrorRecovery { .. } => Err("error recovery".into()), }, TypedAstNodeContent::Expression(te) => { // An expression with an ignored return value... I assume. self.compile_expression(context, te) } TypedAstNodeContent::ImplicitReturnExpression(te) => { self.compile_expression(context, te) } TypedAstNodeContent::WhileLoop(twl) => { self.compile_while_loop(context, twl, span_md_idx) } TypedAstNodeContent::SideEffect => Err("code block side effect".into()), } }) .collect::<Result<Vec<_>, String>>() .map(|vals| vals.last().cloned()) .transpose() .unwrap_or_else(|| Ok(Constant::get_unit(context, None))) } // --------------------------------------------------------------------------------------------- fn compile_expression( &mut self, context: &mut Context, ast_expr: TypedExpression, ) -> Result<Value, String> { let span_md_idx = MetadataIndex::from_span(context, &ast_expr.span); match ast_expr.expression { TypedExpressionVariant::Literal(l) => Ok(convert_literal_to_value(context, &l, span_md_idx)), TypedExpressionVariant::FunctionApplication { name, arguments, function_body, .. } => self.compile_fn_call( context, name.suffix.as_str(), arguments, Some(function_body), span_md_idx, ), TypedExpressionVariant::LazyOperator { op, lhs, rhs } => { self.compile_lazy_op(context, op, *lhs, *rhs, span_md_idx) } TypedExpressionVariant::VariableExpression { name } => { self.compile_var_expr(context, name.as_str(), span_md_idx) } TypedExpressionVariant::Array { contents } => { self.compile_array_expr(context, contents, span_md_idx) } TypedExpressionVariant::ArrayIndex { prefix, index } => { self.compile_array_index(context, *prefix, *index, span_md_idx) } TypedExpressionVariant::StructExpression { struct_name, fields, } => self.compile_struct_expr(context, struct_name.as_str(), fields, span_md_idx), TypedExpressionVariant::CodeBlock(cb) => self.compile_code_block(context, cb), TypedExpressionVariant::FunctionParameter => Err("expr func param".into()), TypedExpressionVariant::IfExp { condition, then, r#else, } => self.compile_if(context, *condition, *then, r#else), TypedExpressionVariant::AsmExpression { registers, body, returns, whole_block_span, } => { let span_md_idx = MetadataIndex::from_span(context, &whole_block_span); self.compile_asm_expr(context, registers, body, returns, span_md_idx) } TypedExpressionVariant::StructFieldAccess { prefix, field_to_access, resolved_type_of_parent, field_to_access_span, .. } => { let span_md_idx = MetadataIndex::from_span(context, &field_to_access_span); self.compile_struct_field_expr( context, *prefix, field_to_access, resolved_type_of_parent, span_md_idx, ) } TypedExpressionVariant::EnumInstantiation { enum_decl, tag, contents, .. } => self.compile_enum_expr(context, enum_decl, tag, contents), TypedExpressionVariant::EnumArgAccess { //Prefix: Box<TypedExpression>, //Arg_num_to_access: usize, //Resolved_type_of_parent: TypeId, .. } => Err("enum arg access".into()), TypedExpressionVariant::Tuple { fields } => self.compile_tuple_expr(context, fields, span_md_idx), TypedExpressionVariant::TupleElemAccess { prefix, elem_to_access_num: idx, elem_to_access_span: span, resolved_type_of_parent: tuple_type, } => self.compile_tuple_elem_expr( context, *prefix, tuple_type, idx, span), TypedExpressionVariant::AbiCast { span, .. } => { let span_md_idx = MetadataIndex::from_span(context, &span); Ok(Constant::get_unit(context, span_md_idx)) } TypedExpressionVariant::SizeOf { variant } => { match variant { SizeOfVariant::Type(_) => unimplemented!(), SizeOfVariant::Val(exp) => self.compile_expression(context, *exp) } }, } } // --------------------------------------------------------------------------------------------- fn compile_return_statement( &mut self, context: &mut Context, ast_expr: TypedExpression, ) -> Result<Value, String> { let span_md_idx = MetadataIndex::from_span(context, &ast_expr.span); let ret_value = self.compile_expression(context, ast_expr)?; match ret_value.get_type(context) { None => Err("Unable to determine type for return statement expression.".into()), Some(ret_ty) => { self.current_block .ins(context) .ret(ret_value, ret_ty, span_md_idx); // RET is a terminator so we must create a new block here. If anything is added to // it then it'll almost certainly be dead code. self.current_block = self.function.create_block(context, None); Ok(Constant::get_unit(context, span_md_idx)) } } } // --------------------------------------------------------------------------------------------- fn compile_lazy_op( &mut self, context: &mut Context, ast_op: LazyOp, ast_lhs: TypedExpression, ast_rhs: TypedExpression, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { // Short-circuit: if LHS is true for AND we still must eval the RHS block; for OR we can // skip the RHS block, and vice-versa. let lhs_val = self.compile_expression(context, ast_lhs)?; let rhs_block = self.function.create_block(context, None); let final_block = self.function.create_block(context, None); let cond_builder = self.current_block.ins(context); match ast_op { LazyOp::And => cond_builder.conditional_branch( lhs_val, rhs_block, final_block, Some(lhs_val), span_md_idx, ), LazyOp::Or => cond_builder.conditional_branch( lhs_val, final_block, rhs_block, Some(lhs_val), span_md_idx, ), }; self.current_block = rhs_block; let rhs_val = self.compile_expression(context, ast_rhs)?; self.current_block .ins(context) .branch(final_block, Some(rhs_val), span_md_idx); self.current_block = final_block; Ok(final_block.get_phi(context)) } // --------------------------------------------------------------------------------------------- fn compile_fn_call( &mut self, context: &mut Context, _ast_name: &str, ast_args: Vec<(Ident, TypedExpression)>, callee_body: Option<TypedCodeBlock>, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { // XXX OK, now, the old compiler inlines everything very lazily. Function calls include // the body of the callee (i.e., the callee_body arg above) and so codegen just pulled it // straight in, no questions asked. Library functions are provided in an initial namespace // from Forc and when the parser builds the AST (or is it during type checking?) these // function bodies are embedded. // // We're going to build little single-use instantiations of the callee and then call them. // For now if they're called in multiple places they'll be redundantly recreated, but also // at present we are still inlining everything so it actually makes little difference. // // Eventually we need to Do It Properly and inline only when necessary, and compile the // standard library to an actual module. { // Firstly create the single-use callee by fudging an AST declaration. let callee_name = context.get_unique_name(); let callee_name_len = callee_name.len(); let callee_ident = Ident::new(crate::span::Span { span: pest::Span::new(std::sync::Arc::from(callee_name), 0, callee_name_len) .unwrap(), path: None, }); let parameters = ast_args .iter() .map(|(name, expr)| TypedFunctionParameter { name: name.clone(), r#type: expr.return_type, type_span: crate::span::Span { span: pest::Span::new(" ".into(), 0, 0).unwrap(), path: None, }, }) .collect(); let callee_body = callee_body.unwrap(); // We're going to have to reverse engineer the return type. let return_type = Self::get_codeblock_return_type(&callee_body).unwrap_or_else(|| // This code block is missing a return or implicit return. The only time I've // seen it happen (whether it's 'valid' or not) is in std::storage::store(), // which has a single asm block which also returns nothing. In this case, it // actually is Unit. insert_type(TypeInfo::Tuple(Vec::new()))); let callee_fn_decl = TypedFunctionDeclaration { name: callee_ident, body: callee_body, parameters, span: crate::span::Span { span: pest::Span::new(" ".into(), 0, 0).unwrap(), path: None, }, return_type, type_parameters: Vec::new(), return_type_span: crate::span::Span { span: pest::Span::new(" ".into(), 0, 0).unwrap(), path: None, }, visibility: Visibility::Private, is_contract_call: false, purity: Default::default(), }; let callee = compile_function(context, self.module, &mut self.struct_names, callee_fn_decl)?; // Now actually call the new function. let args = ast_args .into_iter() .map(|(_, expr)| self.compile_expression(context, expr)) .collect::<Result<Vec<Value>, String>>()?; Ok(self .current_block .ins(context) .call(callee.unwrap(), &args, span_md_idx)) } } fn get_codeblock_return_type(codeblock: &TypedCodeBlock) -> Option<TypeId> { if codeblock.contents.is_empty() { Some(insert_type(TypeInfo::Tuple(Vec::new()))) } else { codeblock .contents .iter() .find_map(|node| match &node.content { TypedAstNodeContent::ReturnStatement(trs) => Some(trs.expr.return_type), TypedAstNodeContent::ImplicitReturnExpression(te) => Some(te.return_type), _otherwise => None, }) } } // --------------------------------------------------------------------------------------------- fn compile_if( &mut self, context: &mut Context, ast_condition: TypedExpression, ast_then: TypedExpression, ast_else: Option<Box<TypedExpression>>, ) -> Result<Value, String> { // Compile the condition expression in the entry block. Then save the current block so we // can jump to the true and false blocks after we've created them. let cond_span_md_idx = MetadataIndex::from_span(context, &ast_condition.span); let cond_value = self.compile_expression(context, ast_condition)?; let entry_block = self.current_block; // To keep the blocks in a nice order we create them only as we populate them. It's // possible when compiling other expressions for the 'current' block to change, and it // should always be the block to which instructions are added. So for the true and false // blocks we create them in turn, compile their contents and save the current block // afterwards. // // Then once they're both created we can add the conditional branch to them from the entry // block. // // Then we create the merge block and jump from the saved blocks to it, again to keep them // in a nice top-to-bottom order. Perhaps there's a better way to order them, using // post-processing CFG analysis, but... meh. let true_block_begin = self.function.create_block(context, None); self.current_block = true_block_begin; let true_value = self.compile_expression(context, ast_then)?; let true_block_end = self.current_block; let false_block_begin = self.function.create_block(context, None); self.current_block = false_block_begin; let false_value = match ast_else { None => Constant::get_unit(context, None), Some(expr) => self.compile_expression(context, *expr)?, }; let false_block_end = self.current_block; entry_block.ins(context).conditional_branch( cond_value, true_block_begin, false_block_begin, None, cond_span_md_idx, ); let merge_block = self.function.create_block(context, None); true_block_end .ins(context) .branch(merge_block, Some(true_value), None); false_block_end .ins(context) .branch(merge_block, Some(false_value), None); self.current_block = merge_block; Ok(merge_block.get_phi(context)) } // --------------------------------------------------------------------------------------------- fn compile_while_loop( &mut self, context: &mut Context, ast_while_loop: TypedWhileLoop, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { // We're dancing around a bit here to make the blocks sit in the right order. Ideally we // have the cond block, followed by the body block which may contain other blocks, and the // final block comes after any body block(s). // Jump to the while cond block. let cond_block = self.function.create_block(context, Some("while".into())); self.current_block .ins(context) .branch(cond_block, None, None); // Fill in the body block now, jump unconditionally to the cond block at its end. let body_block = self .function .create_block(context, Some("while_body".into())); self.current_block = body_block; self.compile_code_block(context, ast_while_loop.body)?; self.current_block .ins(context) .branch(cond_block, None, None); // Create the final block after we're finished with the body. let final_block = self .function .create_block(context, Some("end_while".into())); // Add the conditional which jumps into the body or out to the final block. self.current_block = cond_block; let cond_value = self.compile_expression(context, ast_while_loop.condition)?; self.current_block.ins(context).conditional_branch( cond_value, body_block, final_block, None, None, ); self.current_block = final_block; Ok(Constant::get_unit(context, span_md_idx)) } // --------------------------------------------------------------------------------------------- fn compile_var_expr( &mut self, context: &mut Context, name: &str, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { // We need to check the symbol map first, in case locals are shadowing the args, other // locals or even constants. if let Some(ptr) = self .symbol_map .get(name) .and_then(|local_name| self.function.get_local_ptr(context, local_name)) { let ptr_val = self.current_block.ins(context).get_ptr(ptr, span_md_idx); Ok(if ptr.is_aggregate_ptr(context) { ptr_val } else { self.current_block.ins(context).load(ptr_val, span_md_idx) }) } else if let Some(val) = self.function.get_arg(context, name) { Ok(val) } else if let Some(const_val) = self.module.get_global_constant(context, name) { Ok(const_val) } else { Err(format!("Unable to resolve variable '{}'.", name)) } } // --------------------------------------------------------------------------------------------- fn compile_var_decl( &mut self, context: &mut Context, ast_var_decl: TypedVariableDeclaration, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { let TypedVariableDeclaration { name, body, is_mutable, .. } = ast_var_decl; // We must compile the RHS before checking for shadowing, as it will still be in the // previous scope. let return_type = convert_resolved_typeid( context, &mut self.struct_names, &body.return_type, &body.span, )?; let init_val = self.compile_expression(context, body)?; let local_name = match self.symbol_map.get(name.as_str()) { None => { // Haven't seen this name before. Use it as-is. name.as_str().to_owned() } Some(shadowed_name) => { // Seen before, and this is shadowing the old one. Update to a new name. format!("{}_", shadowed_name) } }; self.symbol_map .insert(name.as_str().to_owned(), local_name.clone()); let ptr = self .function .new_local_ptr(context, local_name, return_type, is_mutable.into(), None) .map_err(|ir_error| ir_error.to_string())?; let ptr_val = self.current_block.ins(context).get_ptr(ptr, span_md_idx); self.current_block .ins(context) .store(ptr_val, init_val, span_md_idx); Ok(init_val) } // --------------------------------------------------------------------------------------------- fn compile_const_decl( &mut self, context: &mut Context, ast_const_decl: TypedConstantDeclaration, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { // This is local to the function, so we add it to the locals, rather than the module // globals like other const decls. let TypedConstantDeclaration { name, value, .. } = ast_const_decl; if let TypedExpressionVariant::Literal(literal) = &value.expression { let initialiser = convert_literal_to_constant(literal); let return_type = convert_resolved_typeid( context, &mut self.struct_names, &value.return_type, &value.span, )?; let name = name.as_str().to_owned(); self.function .new_local_ptr(context, name.clone(), return_type, false, Some(initialiser)) .map_err(|ir_error| ir_error.to_string())?; // We still insert this into the symbol table, as itself... can they be shadowed? // (Hrmm, name resolution in the variable expression code could be smarter about var // decls vs const decls, for now they're essentially the same...) self.symbol_map.insert(name.clone(), name); Ok(Constant::get_unit(context, span_md_idx)) } else { Err("Unsupported constant declaration type.".into()) } } // --------------------------------------------------------------------------------------------- fn compile_reassignment( &mut self, context: &mut Context, ast_reassignment: TypedReassignment, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { let name = ast_reassignment.lhs[0].name.as_str(); let ptr = self .function .get_local_ptr(context, name) .ok_or(format!("variable not found: {}", name))?; let reassign_val = self.compile_expression(context, ast_reassignment.rhs)?; if ast_reassignment.lhs.len() == 1 { // A non-aggregate; use a `store`. let ptr_val = self.current_block.ins(context).get_ptr(ptr, span_md_idx); self.current_block .ins(context) .store(ptr_val, reassign_val, span_md_idx); } else { // An aggregate. Iterate over the field names from the left hand side and collect // field indices. let field_idcs = ast_reassignment.lhs[1..] .iter() .fold( Ok((Vec::new(), *ptr.get_type(context))), |acc, field_name| { // Make sure we have an aggregate to index into. acc.and_then(|(mut fld_idcs, ty)| match ty { Type::Struct(aggregate) => { // Get the field index and also its type for the next iteration. match self .struct_names .get_aggregate_index(&aggregate, field_name.name.as_str()) { None => Err(format!( "Unknown field name {} for struct ???", field_name.name.as_str() )), Some(field_idx) => { let field_type = context.aggregates[aggregate.0] .field_types() [field_idx as usize]; // Save the field index. fld_idcs.push(field_idx); Ok((fld_idcs, field_type)) } } } _otherwise => { Err("Reassignment with multiple accessors to non-aggregate.".into()) } }) }, )? .0; let ty = match ptr.get_type(context) { Type::Struct(aggregate) => *aggregate, _otherwise => { return Err("Reassignment with multiple accessors to non-aggregate.".into()) } }; let ptr_val = self.current_block.ins(context).get_ptr(ptr, span_md_idx); self.current_block.ins(context).insert_value( ptr_val, ty, reassign_val, field_idcs, span_md_idx, ); } // This shouldn't really return a value, it doesn't make sense to return the `store` or // `insert_value` instruction, but we need to return something at this stage. Ok(reassign_val) } // --------------------------------------------------------------------------------------------- fn compile_array_expr( &mut self, context: &mut Context, contents: Vec<TypedExpression>, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { if contents.is_empty() { return Err("Unable to create zero sized static arrays.".into()); } // Create a new aggregate, since they're not named. let elem_type = convert_resolved_typeid_no_span( context, &mut self.struct_names, &contents[0].return_type, )?; let aggregate = Aggregate::new_array(context, elem_type, contents.len() as u64); // Compile each element and insert it immediately. let array_value = Constant::get_undef(context, Type::Array(aggregate), span_md_idx); contents .into_iter() .enumerate() .fold(Ok(array_value), |array_value, (idx, elem_expr)| { // Result::flatten() is currently nightly only. match array_value { Err(_) => array_value, Ok(array_value) => { let index_val = Constant::get_uint(context, 64, idx as u64, span_md_idx); self.compile_expression(context, elem_expr) .map(|elem_value| { self.current_block.ins(context).insert_element( array_value, aggregate, elem_value, index_val, span_md_idx, ) }) } } }) } // --------------------------------------------------------------------------------------------- fn compile_array_index( &mut self, context: &mut Context, array_expr: TypedExpression, index_expr: TypedExpression, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { let array_val = self.compile_expression(context, array_expr)?; let aggregate = match &context.values[array_val.0].value { ValueDatum::Instruction(instruction) => { instruction.get_aggregate(context).ok_or_else(|| { format!( "Unsupported instruction as array value for index expression. {:?}", instruction ) }) } ValueDatum::Argument(Type::Array(aggregate)) => Ok(*aggregate), otherwise => Err(format!( "Unsupported array value for index expression: {:?}", otherwise )), }?; // Check for out of bounds if we have a literal index. let (_, count) = context.aggregates[aggregate.0].array_type(); if let TypedExpressionVariant::Literal(Literal::U64(index)) = index_expr.expression { if index >= *count { // XXX Here is a very specific case where we want to return an Error enum // specifically, if not an actual CompileError. This should be a // CompileError::ArrayOutOfBounds, or at least converted to one. return Err(format!( "Array index out of bounds; the length is {} but the index is {}.", *count, index )); } } let index_val = self.compile_expression(context, index_expr)?; Ok(self.current_block.ins(context).extract_element( array_val, aggregate, index_val, span_md_idx, )) } // --------------------------------------------------------------------------------------------- fn compile_struct_expr( &mut self, context: &mut Context, struct_name: &str, fields: Vec<TypedStructExpressionField>, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { let aggregate = self .struct_names .get_aggregate_by_name(struct_name) .ok_or_else(|| format!("Unknown aggregate {}", struct_name))?; // Compile each of the values for field initialisers and calculate their indices. let inserted_values_indices = fields .into_iter() .map(|field_value| { let name = field_value.name.as_str(); self.compile_expression(context, field_value.value) .and_then(|insert_val| { self.struct_names .get_aggregate_index(&aggregate, name) .ok_or_else(|| { format!("Unknown field name {} for aggregate {}", name, struct_name) }) .map(|insert_idx| (insert_val, insert_idx)) }) }) .collect::<Result<Vec<_>, String>>()?; // Start with a constant empty struct and then fill in the values. let agg_value = Constant::get_undef(context, Type::Struct(aggregate), span_md_idx); Ok(inserted_values_indices.into_iter().fold( agg_value, |agg_value, (insert_val, insert_idx)| { self.current_block.ins(context).insert_value( agg_value, aggregate, insert_val, vec![insert_idx], span_md_idx, ) }, )) } // --------------------------------------------------------------------------------------------- fn compile_struct_field_expr( &mut self, context: &mut Context, ast_struct_expr: TypedExpression, ast_field: OwnedTypedStructField, _ast_parent_type: TypeId, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { let struct_val = self.compile_expression(context, ast_struct_expr)?; let aggregate = match &context.values[struct_val.0].value { ValueDatum::Instruction(instruction) => { instruction.get_aggregate(context).ok_or_else(|| { format!( "Unsupported instruction as struct value for field expression. {:?}", instruction ) }) } ValueDatum::Argument(Type::Struct(aggregate)) => Ok(*aggregate), otherwise => Err(format!( "Unsupported struct value for field expression: {:?}", otherwise )), }?; let field_idx = self .struct_names .get_aggregate_index(&aggregate, &ast_field.name) .ok_or_else(|| format!("Unknown field name {} in struct ???", ast_field.name))?; Ok(self.current_block.ins(context).extract_value( struct_val, aggregate, vec![field_idx], span_md_idx, )) } // --------------------------------------------------------------------------------------------- // As per compile_enum_decl(), these are tagged unions. fn compile_enum_expr( &mut self, context: &mut Context, enum_decl: TypedEnumDeclaration, tag: usize, contents: Option<Box<TypedExpression>>, ) -> Result<Value, String> { // XXX The enum instantiation AST node includes the full declaration. If the enum was // declared in a different module then it seems for now there's no easy way to pre-analyse // it and add its type/aggregate to the context. We can re-use them here if we recognise // the name, and if not add a new aggregate... OTOH the naming seems a little fragile and // we could potentially use the wrong aggregate with the same name, different module... // dunno. let span_md_idx = MetadataIndex::from_span(context, &enum_decl.span); let aggregate = match self .struct_names .get_aggregate_by_name(enum_decl.name.as_str()) { Some(agg) => Ok(agg), None => compile_enum_decl(context, &mut self.struct_names, enum_decl), }?; let tag_value = Constant::get_uint(context, 64, tag as u64, span_md_idx); // Start with the undef and insert the tag. let agg_value = Constant::get_undef(context, Type::Struct(aggregate), span_md_idx); let agg_value = self.current_block.ins(context).insert_value( agg_value, aggregate, tag_value, vec![0], span_md_idx, ); Ok(match contents { None => agg_value, Some(te) => { // Insert the value too. let contents_value = self.compile_expression(context, *te)?; self.current_block.ins(context).insert_value( agg_value, aggregate, contents_value, vec![1], span_md_idx, ) } }) } // --------------------------------------------------------------------------------------------- fn compile_tuple_expr( &mut self, context: &mut Context, fields: Vec<TypedExpression>, span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { if fields.is_empty() { // This is a Unit. We're still debating whether Unit should just be an empty tuple in // the IR or not... it is a special case for now. Ok(Constant::get_unit(context, span_md_idx)) } else { let (init_values, init_types): (Vec<Value>, Vec<Type>) = fields .into_iter() .map(|field_expr| { convert_resolved_typeid_no_span( context, &mut self.struct_names, &field_expr.return_type, ) .and_then(|init_type| { self.compile_expression(context, field_expr) .map(|init_value| (init_value, init_type)) }) }) .collect::<Result<Vec<_>, String>>()? .into_iter() .unzip(); let aggregate = Aggregate::new_struct(context, init_types); let agg_value = Constant::get_undef(context, Type::Struct(aggregate), span_md_idx); Ok(init_values.into_iter().enumerate().fold( agg_value, |agg_value, (insert_idx, insert_val)| { self.current_block.ins(context).insert_value( agg_value, aggregate, insert_val, vec![insert_idx as u64], span_md_idx, ) }, )) } } // --------------------------------------------------------------------------------------------- fn compile_tuple_elem_expr( &mut self, context: &mut Context, tuple: TypedExpression, tuple_type: TypeId, idx: usize, span: Span, ) -> Result<Value, String> { let tuple_value = self.compile_expression(context, tuple)?; if let Type::Struct(aggregate) = convert_resolved_typeid(context, &mut self.struct_names, &tuple_type, &span)? { let span_md_idx = MetadataIndex::from_span(context, &span); Ok(self.current_block.ins(context).extract_value( tuple_value, aggregate, vec![idx as u64], span_md_idx, )) } else { Err("Invalid (non-aggregate?) tuple type for TupleElemAccess?".into()) } } // --------------------------------------------------------------------------------------------- fn compile_asm_expr( &mut self, context: &mut Context, registers: Vec<TypedAsmRegisterDeclaration>, body: Vec<AsmOp>, returns: Option<(AsmRegister, Span)>, whole_block_span_md_idx: Option<MetadataIndex>, ) -> Result<Value, String> { let registers = registers .into_iter() .map( |TypedAsmRegisterDeclaration { initializer, name, .. }| { // Take the optional initialiser, map it to an Option<Result<Value>>, // transpose that to Result<Option<Value>> and map that to an AsmArg. initializer .map(|init_expr| self.compile_expression(context, init_expr)) .transpose() .map(|init| AsmArg { name, initializer: init, }) }, ) .collect::<Result<Vec<AsmArg>, String>>()?; let body = body .into_iter() .map( |AsmOp { op_name, op_args, immediate, span, }| AsmInstruction { name: op_name, args: op_args, immediate, span_md_idx: MetadataIndex::from_span(context, &span), }, ) .collect(); let returns = returns.as_ref().map(|(asm_reg, _)| { Ident::new(Span { span: pest::Span::new(asm_reg.name.as_str().into(), 0, asm_reg.name.len()).unwrap(), path: None, }) }); Ok(self.current_block.ins(context).asm_block( registers, body, returns, whole_block_span_md_idx, )) } } // ------------------------------------------------------------------------------------------------- fn convert_literal_to_value( context: &mut Context, ast_literal: &Literal, span_id_idx: Option<MetadataIndex>, ) -> Value { match ast_literal { Literal::U8(n) | Literal::Byte(n) => Constant::get_uint(context, 8, *n as u64, span_id_idx), Literal::U16(n) => Constant::get_uint(context, 16, *n as u64, span_id_idx), Literal::U32(n) => Constant::get_uint(context, 32, *n as u64, span_id_idx), Literal::U64(n) => Constant::get_uint(context, 64, *n, span_id_idx), Literal::Numeric(n) => Constant::get_uint(context, 64, *n, span_id_idx), Literal::String(s) => Constant::get_string(context, s.as_str().to_owned(), span_id_idx), Literal::Boolean(b) => Constant::get_bool(context, *b, span_id_idx), Literal::B256(bs) => Constant::get_b256(context, *bs, span_id_idx), } } fn convert_literal_to_constant(ast_literal: &Literal) -> Constant { match ast_literal { Literal::U8(n) | Literal::Byte(n) => Constant::new_uint(8, *n as u64), Literal::U16(n) => Constant::new_uint(16, *n as u64), Literal::U32(n) => Constant::new_uint(32, *n as u64), Literal::U64(n) => Constant::new_uint(64, *n), Literal::Numeric(n) => Constant::new_uint(64, *n), Literal::String(s) => Constant::new_string(s.as_str().to_owned()), Literal::Boolean(b) => Constant::new_bool(*b), Literal::B256(bs) => Constant::new_b256(*bs), } } fn convert_resolved_typeid( context: &mut Context, struct_names: &mut StructSymbolMap, ast_type: &TypeId, span: &Span, ) -> Result<Type, String> { // There's probably a better way to convert TypeError to String, but... we'll use something // other than String eventually? IrError? convert_resolved_type( context, struct_names, &resolve_type(*ast_type, span).map_err(|ty_err| format!("{:?}", ty_err))?, ) } fn convert_resolved_typeid_no_span( context: &mut Context, struct_names: &mut StructSymbolMap, ast_type: &TypeId, ) -> Result<Type, String> { let msg = "unknown source location"; let span = crate::span::Span { span: pest::Span::new(std::sync::Arc::from(msg), 0, msg.len()).unwrap(), path: None, }; convert_resolved_typeid(context, struct_names, ast_type, &span) } fn convert_resolved_type( context: &mut Context, struct_names: &mut StructSymbolMap, ast_type: &TypeInfo, ) -> Result<Type, String> { Ok(match ast_type { TypeInfo::UnsignedInteger(nbits) => { // We need impl IntegerBits { fn num_bits() -> u64 { ... } } let nbits = match nbits { IntegerBits::Eight => 8, IntegerBits::Sixteen => 16, IntegerBits::ThirtyTwo => 32, IntegerBits::SixtyFour => 64, }; Type::Uint(nbits) } TypeInfo::Numeric => Type::Uint(64), TypeInfo::Boolean => Type::Bool, TypeInfo::Byte => Type::Uint(8), // XXX? TypeInfo::B256 => Type::B256, TypeInfo::Str(n) => Type::String(*n), TypeInfo::Struct { name, fields } => match struct_names.get_aggregate_by_name(name) { Some(existing_aggregate) => Type::Struct(existing_aggregate), None => { // Let's create a new aggregate from the TypeInfo. create_struct_aggregate(context, struct_names, name.clone(), fields.clone()) .map(&Type::Struct)? } }, TypeInfo::Enum { name, variant_types, } => { match struct_names.get_aggregate_by_name(name) { Some(existing_aggregate) => Type::Struct(existing_aggregate), None => { // Let's create a new aggregate from the TypeInfo. create_enum_aggregate( context, struct_names, name.clone(), variant_types.clone(), ) .map(&Type::Struct)? } } } TypeInfo::Array(elem_type_id, count) => { let elem_type = convert_resolved_typeid_no_span(context, struct_names, elem_type_id)?; Type::Array(Aggregate::new_array(context, elem_type, *count as u64)) } TypeInfo::Tuple(fields) => { if fields.is_empty() { // XXX We've removed Unit from the core compiler, replaced with an empty Tuple. // Perhaps the same should be done for the IR, although it would use an empty // aggregate which might not make as much sense as a dedicated Unit type. Type::Unit } else { create_tuple_aggregate(context, struct_names, fields.clone()).map(Type::Struct)? } } TypeInfo::Custom { .. } => return Err("can't do custom types yet".into()), TypeInfo::SelfType { .. } => return Err("can't do self types yet".into()), TypeInfo::Contract => Type::Contract, TypeInfo::ContractCaller { abi_name, address } => Type::ContractCaller(AbiInstance::new( context, abi_name.prefixes.clone(), abi_name.suffix.clone(), address.clone(), )), TypeInfo::Unknown => return Err("unknown type found in AST..?".into()), TypeInfo::UnknownGeneric { .. } => return Err("unknowngeneric type found in AST..?".into()), TypeInfo::Ref(_) => return Err("ref type found in AST..?".into()), TypeInfo::ErrorRecovery => return Err("error recovery type found in AST..?".into()), }) } // ------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use std::path::PathBuf; use crate::{ control_flow_analysis::{ControlFlowGraph, Graph}, parser::{Rule, SwayParser}, semantic_analysis::{TreeType, TypedParseTree}, }; use pest::Parser; // ------------------------------------------------------------------------------------------------- #[test] fn sway_to_ir_tests() { let manifest_dir = env!("CARGO_MANIFEST_DIR"); let dir: PathBuf = format!("{}/tests/sway_to_ir", manifest_dir).into(); for entry in std::fs::read_dir(dir).unwrap() { // We're only interested in the `.sw` files here. let path = entry.unwrap().path(); match path.extension().unwrap().to_str() { Some("sw") => { // // Run the tests! // println!("---- Sway To IR: {:?} ----", path); test_sway_to_ir(path); } Some("ir") | Some("disabled") => (), _ => panic!( "File with invalid extension in tests dir: {:?}", path.file_name().unwrap_or(path.as_os_str()) ), } } } fn test_sway_to_ir(sw_path: PathBuf) { let input_bytes = std::fs::read(&sw_path).unwrap(); let input = String::from_utf8_lossy(&input_bytes); let mut ir_path = sw_path.clone(); ir_path.set_extension("ir"); let expected_bytes = std::fs::read(&ir_path).unwrap(); let expected = String::from_utf8_lossy(&expected_bytes); let typed_ast = parse_to_typed_ast(sw_path, &input); let ir = super::compile_ast(typed_ast).unwrap(); let output = sway_ir::printer::to_string(&ir); // Use a tricky regex to replace the local path in the metadata with something generic. It // should convert, e.g., // `!0 = filepath "/usr/home/me/sway/sway-core/tests/sway_to_ir/foo.sw"` // to `!0 = filepath "/path/to/foo.sw"` let path_converter = regex::Regex::new(r#"(!\d = filepath ")(?:[^/]*/)*(.+)"#).unwrap(); let output = path_converter.replace_all(output.as_str(), "$1/path/to/$2"); if output != expected { println!("{}", prettydiff::diff_lines(&expected, &output)); } assert_eq!(output, expected); } // ------------------------------------------------------------------------------------------------- #[test] fn ir_printer_parser_tests() { let manifest_dir = env!("CARGO_MANIFEST_DIR"); let dir: PathBuf = format!("{}/tests/sway_to_ir", manifest_dir).into(); for entry in std::fs::read_dir(dir).unwrap() { // We're only interested in the `.ir` files here. let path = entry.unwrap().path(); match path.extension().unwrap().to_str() { Some("ir") => { // // Run the tests! // println!("---- IR Print and Parse Test: {:?} ----", path); test_printer_parser(path); } Some("sw") | Some("disabled") => (), _ => panic!( "File with invalid extension in tests dir: {:?}", path.file_name().unwrap_or(path.as_os_str()) ), } } } fn test_printer_parser(path: PathBuf) { let input_bytes = std::fs::read(&path).unwrap(); let input = String::from_utf8_lossy(&input_bytes); // Use another tricky regex to inject the proper metadata filepath back, so we can create // spans in the parser. NOTE, if/when we refactor spans to not have the source string and // just the path these tests should pass without needing this conversion. let mut true_path = path.clone(); true_path.set_extension("sw"); let path_converter = regex::Regex::new(r#"(!\d = filepath )(?:.+)"#).unwrap(); let input = path_converter.replace_all(&input, format!("$1\"{}\"", true_path.display())); let parsed_ctx = match sway_ir::parser::parse(&input) { Ok(p) => p, Err(e) => { println!("{}: {}", path.display(), e); panic!(); } }; let printed = sway_ir::printer::to_string(&parsed_ctx); if printed != input { println!("{}", prettydiff::diff_lines(&input, &printed)); } assert_eq!(input, printed); } // ------------------------------------------------------------------------------------------------- fn parse_to_typed_ast(path: PathBuf, input: &str) -> TypedParseTree { let mut parsed = SwayParser::parse(Rule::program, std::sync::Arc::from(input)).expect("parse_tree"); let dir_of_code = std::sync::Arc::new(path.parent().unwrap().into()); let file_name = std::sync::Arc::new(path); let build_config = crate::build_config::BuildConfig { file_name, dir_of_code, manifest_path: std::sync::Arc::new(".".into()), use_ir: false, print_intermediate_asm: false, print_finalized_asm: false, print_ir: false, generated_names: std::sync::Arc::new(std::sync::Mutex::new(vec![])), }; let mut warnings = vec![]; let mut errors = vec![]; let parse_tree = crate::parse_root_from_pairs(parsed.next().unwrap().into_inner(), Some(&build_config)) .unwrap(&mut warnings, &mut errors); let mut dead_code_graph = ControlFlowGraph { graph: Graph::new(), entry_points: vec![], namespace: Default::default(), }; TypedParseTree::type_check( parse_tree.tree, crate::create_module(), crate::create_module(), &TreeType::Script, &build_config, &mut dead_code_graph, ) .unwrap(&mut warnings, &mut errors) } } // -------------------------------------------------------------------------------------------------
39.057734
105
0.514043
614809698d887e1979aaf51fb01b494e9820c053
205
use std::path::PathBuf; use serde::Deserialize; #[derive(Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "kebab-case")] pub struct AttachmentStoreConfig { pub store_path: PathBuf, }
18.636364
35
0.736585
bf1edc70fb81a9240b43be1811cfe7ca2dcd3a6e
28,105
#![cfg_attr(all(feature = "no_std", not(test)), no_std)] // Note: Concurrency tests require std for threading/channels #![allow(clippy::type_complexity)] pub mod iter; pub mod iter_set; pub mod lock; pub mod mapref; mod read_only; #[cfg(feature = "serde")] mod serde; mod set; pub mod setref; mod t; mod util; use ahash::RandomState; use cfg_if::cfg_if; use core::borrow::Borrow; use core::fmt; use core::hash::{BuildHasher, Hash, Hasher}; use core::iter::FromIterator; use core::ops::{BitAnd, BitOr, Shl, Shr, Sub}; use iter::{Iter, IterMut, OwningIter}; use lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use mapref::entry::{Entry, OccupiedEntry, VacantEntry}; use mapref::multiple::RefMulti; use mapref::one::{Ref, RefMut}; pub use read_only::ReadOnlyView; pub use set::DashSet; pub use t::Map; cfg_if! { if #[cfg(feature = "raw-api")] { pub use util::SharedValue; } else { use util::SharedValue; } } cfg_if! { if #[cfg(feature = "no_std")] { extern crate alloc; use alloc::{vec::Vec, boxed::Box}; pub(crate) type HashMap<K, V, S> = hashbrown::HashMap<K, SharedValue<V>, S>; } else { pub(crate) type HashMap<K, V, S> = std::collections::HashMap<K, SharedValue<V>, S>; } } fn shard_amount() -> usize { (num_cpus::get() * 4).next_power_of_two() } fn ncb(shard_amount: usize) -> usize { shard_amount.trailing_zeros() as usize } /// DashMap is an implementation of a concurrent associative array/hashmap in Rust. /// /// DashMap tries to implement an easy to use API similar to `std::collections::HashMap` /// with some slight changes to handle concurrency. /// /// DashMap tries to be very simple to use and to be a direct replacement for `RwLock<HashMap<K, V, S>>`. /// To accomplish these all methods take `&self` instead modifying methods taking `&mut self`. /// This allows you to put a DashMap in an `Arc<T>` and share it between threads while being able to modify it. pub struct DashMap<K, V, S = RandomState> { shift: usize, shards: Box<[RwLock<HashMap<K, V, S>>]>, hasher: S, } impl<K: Eq + Hash + Clone, V: Clone, S: Clone> Clone for DashMap<K, V, S> { fn clone(&self) -> Self { let mut inner_shards = Vec::new(); for shard in self.shards.iter() { let shard = shard.read(); inner_shards.push(RwLock::new((*shard).clone())); } Self { shift: self.shift, shards: inner_shards.into_boxed_slice(), hasher: self.hasher.clone(), } } } impl<K, V, S> Default for DashMap<K, V, S> where K: Eq + Hash, S: Default + BuildHasher + Clone, { fn default() -> Self { Self::with_hasher(Default::default()) } } impl<'a, K: 'a + Eq + Hash, V: 'a> DashMap<K, V, RandomState> { /// Creates a new DashMap with a capacity of 0. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let reviews = DashMap::new(); /// reviews.insert("Veloren", "What a fantastic game!"); /// ``` pub fn new() -> Self { DashMap::with_hasher(RandomState::default()) } /// Creates a new DashMap with a specified starting capacity. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let mappings = DashMap::with_capacity(2); /// mappings.insert(2, 4); /// mappings.insert(8, 16); /// ``` pub fn with_capacity(capacity: usize) -> Self { DashMap::with_capacity_and_hasher(capacity, RandomState::default()) } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> DashMap<K, V, S> { /// Wraps this `DashMap` into a read-only view. This view allows to obtain raw references to the stored values. pub fn into_read_only(self) -> ReadOnlyView<K, V, S> { ReadOnlyView::new(self) } /// Creates a new DashMap with a capacity of 0 and the provided hasher. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// use std::collections::hash_map::RandomState; /// /// let s = RandomState::new(); /// let reviews = DashMap::with_hasher(s); /// reviews.insert("Veloren", "What a fantastic game!"); /// ``` pub fn with_hasher(hasher: S) -> Self { Self::with_capacity_and_hasher(0, hasher) } /// Creates a new DashMap with a specified starting capacity and hasher. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// use std::collections::hash_map::RandomState; /// /// let s = RandomState::new(); /// let mappings = DashMap::with_capacity_and_hasher(2, s); /// mappings.insert(2, 4); /// mappings.insert(8, 16); /// ``` pub fn with_capacity_and_hasher(mut capacity: usize, hasher: S) -> Self { let shard_amount = shard_amount(); let shift = util::ptr_size_bits() - ncb(shard_amount); if capacity != 0 { capacity = (capacity + (shard_amount - 1)) & !(shard_amount - 1); } let cps = capacity / shard_amount; let shards = (0..shard_amount) .map(|_| RwLock::new(HashMap::with_capacity_and_hasher(cps, hasher.clone()))) .collect(); Self { shift, shards, hasher, } } /// Hash a given item to produce a usize. /// Uses the provided or default HashBuilder. pub fn hash_usize<T: Hash>(&self, item: &T) -> usize { let mut hasher = self.hasher.build_hasher(); item.hash(&mut hasher); hasher.finish() as usize } cfg_if! { if #[cfg(feature = "raw-api")] { /// Allows you to peek at the inner shards that store your data. /// You should probably not use this unless you know what you are doing. /// /// Requires the `raw-api` feature to be enabled. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let map = DashMap::<(), ()>::new(); /// println!("Amount of shards: {}", map.shards().len()); /// ``` pub fn shards(&self) -> &[RwLock<HashMap<K, V, S>>] { &self.shards } } else { #[allow(dead_code)] pub(crate) fn shards(&self) -> &[RwLock<HashMap<K, V, S>>] { &self.shards } } } cfg_if! { if #[cfg(feature = "raw-api")] { /// Finds which shard a certain key is stored in. /// You should probably not use this unless you know what you are doing. /// Note that shard selection is dependent on the default or provided HashBuilder. /// /// Requires the `raw-api` feature to be enabled. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let map = DashMap::new(); /// map.insert("coca-cola", 1.4); /// println!("coca-cola is stored in shard: {}", map.determine_map("coca-cola")); /// ``` pub fn determine_map<Q>(&self, key: &Q) -> usize where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { let hash = self.hash_usize(&key); self.determine_shard(hash) } } } cfg_if! { if #[cfg(feature = "raw-api")] { /// Finds which shard a certain hash is stored in. /// /// Requires the `raw-api` feature to be enabled. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let map: DashMap<i32, i32> = DashMap::new(); /// let key = "key"; /// let hash = map.hash_usize(&key); /// println!("hash is stored in shard: {}", map.determine_shard(hash)); /// ``` pub fn determine_shard(&self, hash: usize) -> usize { // Leave the high 7 bits for the HashBrown SIMD tag. (hash << 7) >> self.shift } } else { pub(crate) fn determine_shard(&self, hash: usize) -> usize { // Leave the high 7 bits for the HashBrown SIMD tag. (hash << 7) >> self.shift } } } /// Returns a reference to the map's [`BuildHasher`]. /// /// # Examples /// /// ```rust /// use dashmap::DashMap; /// use ahash::RandomState; /// /// let hasher = RandomState::new(); /// let map: DashMap<i32, i32> = DashMap::new(); /// let hasher: &RandomState = map.hasher(); /// ``` /// /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html pub fn hasher(&self) -> &S { &self.hasher } /// Inserts a key and a value into the map. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let map = DashMap::new(); /// map.insert("I am the key!", "And I am the value!"); /// ``` pub fn insert(&self, key: K, value: V) -> Option<V> { self._insert(key, value) } /// Removes an entry from the map, returning the key and value if they existed in the map. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let soccer_team = DashMap::new(); /// soccer_team.insert("Jack", "Goalie"); /// assert_eq!(soccer_team.remove("Jack").unwrap().1, "Goalie"); /// ``` pub fn remove<Q>(&self, key: &Q) -> Option<(K, V)> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self._remove(key) } /// Removes an entry from the map, returning the key and value /// if the entry existed and the provided conditional function returned true. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// ``` /// use dashmap::DashMap; /// /// let soccer_team = DashMap::new(); /// soccer_team.insert("Sam", "Forward"); /// soccer_team.remove_if("Sam", |_, position| position == &"Goalie"); /// assert!(soccer_team.contains_key("Sam")); /// ``` /// ``` /// use dashmap::DashMap; /// /// let soccer_team = DashMap::new(); /// soccer_team.insert("Sam", "Forward"); /// soccer_team.remove_if("Sam", |_, position| position == &"Forward"); /// assert!(!soccer_team.contains_key("Sam")); /// ``` pub fn remove_if<Q>(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self._remove_if(key, f) } /// Creates an iterator over a DashMap yielding immutable references. /// /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let words = DashMap::new(); /// words.insert("hello", "world"); /// assert_eq!(words.iter().count(), 1); /// ``` pub fn iter(&'a self) -> Iter<'a, K, V, S, DashMap<K, V, S>> { self._iter() } /// Iterator over a DashMap yielding mutable references. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let map = DashMap::new(); /// map.insert("Johnny", 21); /// map.iter_mut().for_each(|mut r| *r += 1); /// assert_eq!(*map.get("Johnny").unwrap(), 22); /// ``` pub fn iter_mut(&'a self) -> IterMut<'a, K, V, S, DashMap<K, V, S>> { self._iter_mut() } /// Get a immutable reference to an entry in the map /// /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let youtubers = DashMap::new(); /// youtubers.insert("Bosnian Bill", 457000); /// assert_eq!(*youtubers.get("Bosnian Bill").unwrap(), 457000); /// ``` pub fn get<Q>(&'a self, key: &Q) -> Option<Ref<'a, K, V, S>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self._get(key) } /// Get a mutable reference to an entry in the map /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let class = DashMap::new(); /// class.insert("Albin", 15); /// *class.get_mut("Albin").unwrap() -= 1; /// assert_eq!(*class.get("Albin").unwrap(), 14); /// ``` pub fn get_mut<Q>(&'a self, key: &Q) -> Option<RefMut<'a, K, V, S>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self._get_mut(key) } /// Remove excess capacity to reduce memory usage. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. pub fn shrink_to_fit(&self) { self._shrink_to_fit(); } /// Retain elements that whose predicates return true /// and discard elements whose predicates return false. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let people = DashMap::new(); /// people.insert("Albin", 15); /// people.insert("Jones", 22); /// people.insert("Charlie", 27); /// people.retain(|_, v| *v > 20); /// assert_eq!(people.len(), 2); /// ``` pub fn retain(&self, f: impl FnMut(&K, &mut V) -> bool) { self._retain(f); } /// Fetches the total number of key-value pairs stored in the map. /// /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let people = DashMap::new(); /// people.insert("Albin", 15); /// people.insert("Jones", 22); /// people.insert("Charlie", 27); /// assert_eq!(people.len(), 3); /// ``` pub fn len(&self) -> usize { self._len() } /// Checks if the map is empty or not. /// /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let map = DashMap::<(), ()>::new(); /// assert!(map.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self._is_empty() } /// Removes all key-value pairs in the map. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let stats = DashMap::new(); /// stats.insert("Goals", 4); /// assert!(!stats.is_empty()); /// stats.clear(); /// assert!(stats.is_empty()); /// ``` pub fn clear(&self) { self._clear(); } /// Returns how many key-value pairs the map can store without reallocating. /// /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. pub fn capacity(&self) -> usize { self._capacity() } /// Modify a specific value according to a function. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let stats = DashMap::new(); /// stats.insert("Goals", 4); /// stats.alter("Goals", |_, v| v * 2); /// assert_eq!(*stats.get("Goals").unwrap(), 8); /// ``` /// /// # Panics /// /// If the given closure panics, then `alter_all` will abort the process pub fn alter<Q>(&self, key: &Q, f: impl FnOnce(&K, V) -> V) where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self._alter(key, f); } /// Modify every value in the map according to a function. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let stats = DashMap::new(); /// stats.insert("Wins", 4); /// stats.insert("Losses", 2); /// stats.alter_all(|_, v| v + 1); /// assert_eq!(*stats.get("Wins").unwrap(), 5); /// assert_eq!(*stats.get("Losses").unwrap(), 3); /// ``` /// /// # Panics /// /// If the given closure panics, then `alter_all` will abort the process pub fn alter_all(&self, f: impl FnMut(&K, V) -> V) { self._alter_all(f); } /// Checks if the map contains a specific key. /// /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. /// /// # Examples /// /// ``` /// use dashmap::DashMap; /// /// let team_sizes = DashMap::new(); /// team_sizes.insert("Dakota Cherries", 23); /// assert!(team_sizes.contains_key("Dakota Cherries")); /// ``` pub fn contains_key<Q>(&self, key: &Q) -> bool where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self._contains_key(key) } /// Advanced entry API that tries to mimic `std::collections::HashMap`. /// See the documentation on `dashmap::mapref::entry` for more details. /// /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. pub fn entry(&'a self, key: K) -> Entry<'a, K, V, S> { self._entry(key) } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: 'a + BuildHasher + Clone> Map<'a, K, V, S> for DashMap<K, V, S> { fn _shard_count(&self) -> usize { self.shards.len() } unsafe fn _get_read_shard(&'a self, i: usize) -> &'a HashMap<K, V, S> { debug_assert!(i < self.shards.len()); self.shards.get_unchecked(i).get() } unsafe fn _yield_read_shard(&'a self, i: usize) -> RwLockReadGuard<'a, HashMap<K, V, S>> { debug_assert!(i < self.shards.len()); self.shards.get_unchecked(i).read() } unsafe fn _yield_write_shard(&'a self, i: usize) -> RwLockWriteGuard<'a, HashMap<K, V, S>> { debug_assert!(i < self.shards.len()); self.shards.get_unchecked(i).write() } fn _insert(&self, key: K, value: V) -> Option<V> { let hash = self.hash_usize(&key); let idx = self.determine_shard(hash); let mut shard = unsafe { self._yield_write_shard(idx) }; shard .insert(key, SharedValue::new(value)) .map(|v| v.into_inner()) } fn _remove<Q>(&self, key: &Q) -> Option<(K, V)> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { let hash = self.hash_usize(&key); let idx = self.determine_shard(hash); let mut shard = unsafe { self._yield_write_shard(idx) }; shard.remove_entry(key).map(|(k, v)| (k, v.into_inner())) } fn _remove_if<Q>(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { let hash = self.hash_usize(&key); let idx = self.determine_shard(hash); let mut shard = unsafe { self._yield_write_shard(idx) }; if let Some((k, v)) = shard.get_key_value(key) { if f(k, v.get()) { shard.remove_entry(key).map(|(k, v)| (k, v.into_inner())) } else { None } } else { None } } fn _iter(&'a self) -> Iter<'a, K, V, S, DashMap<K, V, S>> { Iter::new(self) } fn _iter_mut(&'a self) -> IterMut<'a, K, V, S, DashMap<K, V, S>> { IterMut::new(self) } fn _get<Q>(&'a self, key: &Q) -> Option<Ref<'a, K, V, S>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { let hash = self.hash_usize(&key); let idx = self.determine_shard(hash); let shard = unsafe { self._yield_read_shard(idx) }; if let Some((kptr, vptr)) = shard.get_key_value(key) { unsafe { let kptr = util::change_lifetime_const(kptr); let vptr = util::change_lifetime_const(vptr); Some(Ref::new(shard, kptr, vptr.get())) } } else { None } } fn _get_mut<Q>(&'a self, key: &Q) -> Option<RefMut<'a, K, V, S>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { let hash = self.hash_usize(&key); let idx = self.determine_shard(hash); let shard = unsafe { self._yield_write_shard(idx) }; if let Some((kptr, vptr)) = shard.get_key_value(key) { unsafe { let kptr = util::change_lifetime_const(kptr); let vptr = &mut *vptr.as_ptr(); Some(RefMut::new(shard, kptr, vptr)) } } else { None } } fn _shrink_to_fit(&self) { self.shards.iter().for_each(|s| s.write().shrink_to_fit()); } fn _retain(&self, mut f: impl FnMut(&K, &mut V) -> bool) { self.shards .iter() .for_each(|s| s.write().retain(|k, v| f(k, v.get_mut()))); } fn _len(&self) -> usize { self.shards.iter().map(|s| s.read().len()).sum() } fn _capacity(&self) -> usize { self.shards.iter().map(|s| s.read().capacity()).sum() } fn _alter<Q>(&self, key: &Q, f: impl FnOnce(&K, V) -> V) where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { if let Some(mut r) = self.get_mut(key) { util::map_in_place_2(r.pair_mut(), f); } } fn _alter_all(&self, mut f: impl FnMut(&K, V) -> V) { self.shards.iter().for_each(|s| { s.write() .iter_mut() .for_each(|(k, v)| util::map_in_place_2((k, v.get_mut()), &mut f)); }); } fn _entry(&'a self, key: K) -> Entry<'a, K, V, S> { let hash = self.hash_usize(&key); let idx = self.determine_shard(hash); let shard = unsafe { self._yield_write_shard(idx) }; if let Some((kptr, vptr)) = shard.get_key_value(&key) { unsafe { let kptr = util::change_lifetime_const(kptr); let vptr = &mut *vptr.as_ptr(); Entry::Occupied(OccupiedEntry::new(shard, key, (kptr, vptr))) } } else { Entry::Vacant(VacantEntry::new(shard, key)) } } fn _hasher(&self) -> S { self.hasher.clone() } } impl<K: Eq + Hash + fmt::Debug, V: fmt::Debug, S: BuildHasher + Clone> fmt::Debug for DashMap<K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut pmap = f.debug_map(); for r in self { let (k, v) = r.pair(); pmap.entry(k, v); } pmap.finish() } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> Shl<(K, V)> for &'a DashMap<K, V, S> { type Output = Option<V>; fn shl(self, pair: (K, V)) -> Self::Output { self.insert(pair.0, pair.1) } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> Shr<&Q> for &'a DashMap<K, V, S> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { type Output = Ref<'a, K, V, S>; fn shr(self, key: &Q) -> Self::Output { self.get(key).unwrap() } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> BitOr<&Q> for &'a DashMap<K, V, S> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { type Output = RefMut<'a, K, V, S>; fn bitor(self, key: &Q) -> Self::Output { self.get_mut(key).unwrap() } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> Sub<&Q> for &'a DashMap<K, V, S> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { type Output = Option<(K, V)>; fn sub(self, key: &Q) -> Self::Output { self.remove(key) } } impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> BitAnd<&Q> for &'a DashMap<K, V, S> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { type Output = bool; fn bitand(self, key: &Q) -> Self::Output { self.contains_key(key) } } impl<'a, K: Eq + Hash, V, S: BuildHasher + Clone> IntoIterator for DashMap<K, V, S> { type Item = (K, V); type IntoIter = OwningIter<K, V, S>; fn into_iter(self) -> Self::IntoIter { OwningIter::new(self) } } impl<'a, K: Eq + Hash, V, S: BuildHasher + Clone> IntoIterator for &'a DashMap<K, V, S> { type Item = RefMulti<'a, K, V, S>; type IntoIter = Iter<'a, K, V, S, DashMap<K, V, S>>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<K: Eq + Hash, V, S: BuildHasher + Clone> Extend<(K, V)> for DashMap<K, V, S> { fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, intoiter: I) { for pair in intoiter.into_iter() { self.insert(pair.0, pair.1); } } } impl<K: Eq + Hash, V> FromIterator<(K, V)> for DashMap<K, V, RandomState> { fn from_iter<I: IntoIterator<Item = (K, V)>>(intoiter: I) -> Self { let mut map = DashMap::new(); map.extend(intoiter); map } } #[cfg(test)] mod tests { use crate::DashMap; use crate::mapref::one::{Ref, RefMut}; cfg_if::cfg_if! { if #[cfg(feature = "no_std")] { use alloc::string::String; use ahash::RandomState; } else { use std::collections::hash_map::RandomState; } } #[test] fn test_basic() { let dm = DashMap::new(); dm.insert(0, 0); assert_eq!(dm.get(&0).unwrap().value(), &0); } #[test] fn test_default() { let dm: DashMap<u32, u32> = DashMap::default(); dm.insert(0, 0); assert_eq!(dm.get(&0).unwrap().value(), &0); } #[test] fn test_multiple_hashes() { let dm: DashMap<u32, u32> = DashMap::default(); for i in 0..100 { dm.insert(0, i); dm.insert(i, i); } for i in 1..100 { let r = dm.get(&i).unwrap(); assert_eq!(i, *r.value()); assert_eq!(i, *r.key()); } let r = dm.get(&0).unwrap(); assert_eq!(99, *r.value()); } #[test] fn test_more_complex_values() { #[derive(Hash, PartialEq, Debug, Clone)] struct T0 { s: String, u: u8, } let dm = DashMap::new(); let range = 0..10; for i in range { let t = T0 { s: i.to_string(), u: i as u8, }; dm.insert(i, t.clone()); assert_eq!(&t, dm.get(&i).unwrap().value()); } } #[test] fn test_different_hashers_randomstate() { let dm_hm_default: DashMap<u32, u32, RandomState> = DashMap::with_hasher(RandomState::new()); for i in 0..10 { dm_hm_default.insert(i, i); assert_eq!(i, *dm_hm_default.get(&i).unwrap().value()); } } #[test] fn test_ref_map() { let dm = DashMap::new(); dm.insert(0, [0]); { let item = Ref::map(dm.get(&0).unwrap(), |v| &v[0]); assert_eq!(item.value(), &0); } { let item = RefMut::map(dm.get_mut(&0).unwrap(), |v| &mut v[0]); assert_eq!(item.value(), &0); } } }
26.564272
118
0.516705
f59f1160e703e4bd82eec71654567c737666ee61
736
// edition:2018 #![allow(non_camel_case_types)] #![feature(async_await, await_macro)] mod outer_mod { pub mod await { //~ ERROR expected identifier, found reserved keyword `await` pub struct await; //~ ERROR expected identifier, found reserved keyword `await` } } use self::outer_mod::await::await; //~ ERROR expected identifier, found reserved keyword `await` //~^ ERROR expected identifier, found reserved keyword `await` struct Foo { await: () } //~^ ERROR expected identifier, found reserved keyword `await` impl Foo { fn await() {} } //~^ ERROR expected identifier, found reserved keyword `await` macro_rules! await { //~^ ERROR expected identifier, found reserved keyword `await` () => {} } fn main() {}
28.307692
96
0.69837
14882c078369d208c45be5d3eebde202a38d577f
2,837
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::any::Any; use std::sync::Arc; use common_datablocks::DataBlock; use common_datavalues::prelude::*; use common_exception::Result; use common_meta_types::TableIdent; use common_meta_types::TableInfo; use common_meta_types::TableMeta; use common_planners::ReadDataSourcePlan; use common_streams::DataBlockStream; use common_streams::SendableDataBlockStream; use crate::catalogs::Catalog; use crate::sessions::QueryContext; use crate::storages::Table; pub struct EnginesTable { table_info: TableInfo, } impl EnginesTable { pub fn create(table_id: u64) -> Self { let schema = DataSchemaRefExt::create(vec![ DataField::new("Engine", Vu8::to_data_type()), DataField::new("Comment", Vu8::to_data_type()), ]); let table_info = TableInfo { desc: "'system'.'engines'".to_string(), name: "engines".to_string(), ident: TableIdent::new(table_id, 0), meta: TableMeta { schema, engine: "SystemEngines".to_string(), ..Default::default() }, }; EnginesTable { table_info } } } #[async_trait::async_trait] impl Table for EnginesTable { fn as_any(&self) -> &dyn Any { self } fn get_table_info(&self) -> &TableInfo { &self.table_info } async fn read( &self, _ctx: Arc<QueryContext>, _plan: &ReadDataSourcePlan, ) -> Result<SendableDataBlockStream> { let table_engine_descriptors = _ctx.get_catalog().get_table_engines(); let mut engine_name = Vec::with_capacity(table_engine_descriptors.len()); let mut engine_comment = Vec::with_capacity(table_engine_descriptors.len()); for descriptor in &table_engine_descriptors { engine_name.push(descriptor.engine_name.clone()); engine_comment.push(descriptor.comment.clone()); } let block = DataBlock::create(self.table_info.schema(), vec![ Series::from_data(engine_name), Series::from_data(engine_comment), ]); Ok(Box::pin(DataBlockStream::create( self.table_info.schema(), None, vec![block], ))) } }
31.522222
84
0.647515
909e093ed3f903d7bfa4d3adbed845fd883ddc78
2,247
// Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0. #![feature(derive_default_enum)] #![feature(array_from_fn)] mod metrics; mod slab; mod tls; use kvproto::kvrpcpb as pb; pub use self::{ slab::{TrackerToken, GLOBAL_TRACKERS, INVALID_TRACKER_TOKEN}, tls::*, }; #[derive(Debug)] pub struct Tracker { pub req_info: RequestInfo, pub metrics: RequestMetrics, // TODO: Add request stage info // pub current_stage: RequestStage, } impl Tracker { pub fn new(req_info: RequestInfo) -> Self { Self { req_info, metrics: Default::default(), } } pub fn write_scan_detail(&self, detail_v2: &mut pb::ScanDetailV2) { detail_v2.set_rocksdb_block_read_byte(self.metrics.block_read_byte); detail_v2.set_rocksdb_block_read_count(self.metrics.block_read_count); detail_v2.set_rocksdb_block_cache_hit_count(self.metrics.block_cache_hit_count); detail_v2.set_rocksdb_key_skipped_count(self.metrics.internal_key_skipped_count); detail_v2.set_rocksdb_delete_skipped_count(self.metrics.deleted_key_skipped_count); } } #[derive(Debug, Default)] pub struct RequestInfo { pub region_id: u64, pub start_ts: u64, pub task_id: u64, pub resource_group_tag: Vec<u8>, pub request_type: RequestType, } impl RequestInfo { pub fn new(ctx: &pb::Context, request_type: RequestType, start_ts: u64) -> RequestInfo { RequestInfo { region_id: ctx.get_region_id(), start_ts, task_id: ctx.get_task_id(), resource_group_tag: ctx.get_resource_group_tag().to_vec(), request_type, } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum RequestType { #[default] Unknown, KvGet, KvBatchGet, KvBatchGetCommand, KvScan, KvScanLock, CoprocessorDag, CoprocessorAnalyze, CoprocessorChecksum, } #[derive(Debug, Default, Clone)] pub struct RequestMetrics { pub get_snapshot_nanos: u64, pub block_cache_hit_count: u64, pub block_read_count: u64, pub block_read_byte: u64, pub block_read_nanos: u64, pub internal_key_skipped_count: u64, pub deleted_key_skipped_count: u64, }
25.827586
92
0.686248
030b0ad6aadb5ad16eddc3e093c2de775002cb2b
676
use crate::rational::Rational64; use std::any::Any; use std::sync::Arc; /// Timestamp information for frames and packets. #[derive(Debug, Clone, Default)] pub struct TimeInfo { /// Presentation timestamp. pub pts: Option<i64>, /// Decode timestamp. pub dts: Option<i64>, /// Duration (in timebase units). pub duration: Option<u64>, /// Timebase numerator/denominator (i.e 1/75th of a second). /// /// Its value does not vary among frames/packets, since it is /// computed and defined at stream level. pub timebase: Option<Rational64>, /// Timebase user private data. pub user_private: Option<Arc<dyn Any + Send + Sync>>, }
30.727273
65
0.662722
76a360e0984764c17cca6a254d618810dc7e00e9
4,589
use timely::dataflow::*; use timely::dataflow::operators::*; use timely::Data; mod index; mod extender; mod intersector; pub use self::index::Index; pub use self::extender::IndexStream; pub use self::intersector::IntersectOnly; //use ::Indexable; /// Functionality used by GenericJoin to extend prefixes with new attributes. /// /// These methods are used in `GenericJoin`'s `extend` method, and may not be broadly useful elsewhere. pub trait StreamPrefixExtender<G: Scope, W: Data> { /// The type of data to extend. type Prefix: Data; /// The type of the extentions. type Extension: Data; /// Updates each prefix with an upper bound on the number of extensions for this relation. fn count(&self, Stream<G, (Self::Prefix, u64, u64, W)>, u64) -> Stream<G, (Self::Prefix, u64, u64, W)>; /// Proposes each extension from this relation. fn propose(&self, Stream<G, (Self::Prefix, W)>) -> Stream<G, (Self::Prefix, Vec<Self::Extension>, W)>; /// Restricts proposals by those this relation would propose. fn intersect(&self, Stream<G, (Self::Prefix, Vec<Self::Extension>, W)>) -> Stream<G, (Self::Prefix, Vec<Self::Extension>, W)>; } /// Extension method for generic join functionality. pub trait GenericJoin<G:Scope, P:Data, W: Data> { /// Extends a stream of prefixes using the supplied prefix extenders. fn extend<'a, E: Data>(&self, extenders: Vec<Box<StreamPrefixExtender<G, W, Prefix=P, Extension=E>+'a>>) -> Stream<G, (P, Vec<E>, W)>; } pub trait StreamPrefixIntersector<G:Scope>{ type Prefix: Data; fn intersect_only(&self,Stream<G, (Self::Prefix, i32)>) -> Stream<G,(Self::Prefix, i32)>; } pub trait Intersection<G:Scope, P:Data>{ fn intersect_only<'a>(&self, intersectors: Vec<Box<StreamPrefixIntersector<G, Prefix=P>+'a>>) -> Stream<G, (P, i32)>; } impl<G: Scope, P:Data> Intersection<G, P> for Stream<G, (P, i32)> { fn intersect_only<'a>(&self, intersectors: Vec<Box<StreamPrefixIntersector<G, Prefix=P> + 'a>>) -> Stream<G, (P, i32)> { let mut results = self.clone(); for intersector in intersectors{ results = intersector.intersect_only(results); } results } } // A layer of GenericJoin, in which a collection of prefixes are extended by one attribute impl<G: Scope, P:Data, W: Data> GenericJoin<G, P, W> for Stream<G, (P, W)> { fn extend<'a, E>(&self, extenders: Vec<Box<StreamPrefixExtender<G, W, Prefix=P, Extension=E>+'a>>) -> Stream<G, (P, Vec<E>, W)> where E: Data { if extenders.len() == 1 { extenders[0].propose(self.clone()) } else { let mut counts = self.map(|(p,s)| (p, 1 << 31, 0, s)); for (index,extender) in extenders.iter().enumerate() { counts = extender.count(counts, index as u64); } let parts = counts.partition(extenders.len() as u64, |(p, _, i, w)| (i, (p, w))); let mut results = Vec::new(); for (index, nominations) in parts.into_iter().enumerate() { let mut extensions = extenders[index].propose(nominations); for other in (0..extenders.len()).filter(|&x| x != index) { extensions = extenders[other].intersect(extensions); } results.push(extensions); // save extensions } self.scope().concatenate(results).map(|(p,es,w)| (p,es,w)) } } } /// Reports the number of elements satisfing the predicate. /// /// This methods *relies strongly* on the assumption that the predicate /// stays false once it becomes false, a joint property of the predicate /// and the slice. This allows `advance` to use exponential search to /// count the number of elements in time logarithmic in the result. // #[inline(never)] pub fn advance<T, F: Fn(&T)->bool>(slice: &[T], function: F) -> usize { // start with no advance let mut index = 0; if index < slice.len() && function(&slice[index]) { // advance in exponentially growing steps. let mut step = 1; while index + step < slice.len() && function(&slice[index + step]) { index += step; step = step << 1; } // advance in exponentially shrinking steps. step = step >> 1; while step > 0 { if index + step < slice.len() && function(&slice[index + step]) { index += step; } step = step >> 1; } index += 1; } index }
37.008065
131
0.59817
cc713e5ae138eedb5185515107977d3248e868ec
25,831
#![allow(unused_imports, non_camel_case_types)] use crate::model::CodeableConcept::CodeableConcept; use crate::model::DocumentReference_Content::DocumentReference_Content; use crate::model::DocumentReference_Context::DocumentReference_Context; use crate::model::DocumentReference_RelatesTo::DocumentReference_RelatesTo; use crate::model::Element::Element; use crate::model::Extension::Extension; use crate::model::Identifier::Identifier; use crate::model::Meta::Meta; use crate::model::Narrative::Narrative; use crate::model::Reference::Reference; use crate::model::ResourceList::ResourceList; use serde_json::json; use serde_json::value::Value; use std::borrow::Cow; /// A reference to a document of any kind for any purpose. Provides metadata about /// the document so that the document can be discovered and managed. The scope of a /// document is any seralized object with a mime-type, so includes formal patient /// centric documents (CDA), cliical notes, scanned paper, and non-patient specific /// documents like policy text. #[derive(Debug)] pub struct DocumentReference<'a> { pub(crate) value: Cow<'a, Value>, } impl DocumentReference<'_> { pub fn new(value: &Value) -> DocumentReference { DocumentReference { value: Cow::Borrowed(value), } } pub fn to_json(&self) -> Value { (*self.value).clone() } /// Extensions for date pub fn _date(&self) -> Option<Element> { if let Some(val) = self.value.get("_date") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for description pub fn _description(&self) -> Option<Element> { if let Some(val) = self.value.get("_description") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for docStatus pub fn _doc_status(&self) -> Option<Element> { if let Some(val) = self.value.get("_docStatus") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for implicitRules pub fn _implicit_rules(&self) -> Option<Element> { if let Some(val) = self.value.get("_implicitRules") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for language pub fn _language(&self) -> Option<Element> { if let Some(val) = self.value.get("_language") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for status pub fn _status(&self) -> Option<Element> { if let Some(val) = self.value.get("_status") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Which person or organization authenticates that this document is valid. pub fn authenticator(&self) -> Option<Reference> { if let Some(val) = self.value.get("authenticator") { return Some(Reference { value: Cow::Borrowed(val), }); } return None; } /// Identifies who is responsible for adding the information to the document. pub fn author(&self) -> Option<Vec<Reference>> { if let Some(Value::Array(val)) = self.value.get("author") { return Some( val.into_iter() .map(|e| Reference { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// A categorization for the type of document referenced - helps for indexing and /// searching. This may be implied by or derived from the code specified in the /// DocumentReference.type. pub fn category(&self) -> Option<Vec<CodeableConcept>> { if let Some(Value::Array(val)) = self.value.get("category") { return Some( val.into_iter() .map(|e| CodeableConcept { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// These resources do not have an independent existence apart from the resource /// that contains them - they cannot be identified independently, and nor can they /// have their own independent transaction scope. pub fn contained(&self) -> Option<Vec<ResourceList>> { if let Some(Value::Array(val)) = self.value.get("contained") { return Some( val.into_iter() .map(|e| ResourceList { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The document and format referenced. There may be multiple content element /// repetitions, each with a different format. pub fn content(&self) -> Vec<DocumentReference_Content> { self.value .get("content") .unwrap() .as_array() .unwrap() .into_iter() .map(|e| DocumentReference_Content { value: Cow::Borrowed(e), }) .collect::<Vec<_>>() } /// The clinical context in which the document was prepared. pub fn context(&self) -> Option<DocumentReference_Context> { if let Some(val) = self.value.get("context") { return Some(DocumentReference_Context { value: Cow::Borrowed(val), }); } return None; } /// Identifies the organization or group who is responsible for ongoing maintenance /// of and access to the document. pub fn custodian(&self) -> Option<Reference> { if let Some(val) = self.value.get("custodian") { return Some(Reference { value: Cow::Borrowed(val), }); } return None; } /// When the document reference was created. pub fn date(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("date") { return Some(string); } return None; } /// Human-readable description of the source document. pub fn description(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("description") { return Some(string); } return None; } /// The status of the underlying document. pub fn doc_status(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("docStatus") { return Some(string); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the resource. To make the use of extensions safe and manageable, /// there is a strict set of governance applied to the definition and use of /// extensions. Though any implementer can define an extension, there is a set of /// requirements that SHALL be met as part of the definition of the extension. pub fn extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("extension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The logical id of the resource, as used in the URL for the resource. Once /// assigned, this value never changes. pub fn id(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("id") { return Some(string); } return None; } /// Other identifiers associated with the document, including version independent /// identifiers. pub fn identifier(&self) -> Option<Vec<Identifier>> { if let Some(Value::Array(val)) = self.value.get("identifier") { return Some( val.into_iter() .map(|e| Identifier { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// A reference to a set of rules that were followed when the resource was /// constructed, and which must be understood when processing the content. Often, /// this is a reference to an implementation guide that defines the special rules /// along with other profiles etc. pub fn implicit_rules(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("implicitRules") { return Some(string); } return None; } /// The base language in which the resource is written. pub fn language(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("language") { return Some(string); } return None; } /// Document identifier as assigned by the source of the document. This identifier /// is specific to this version of the document. This unique identifier may be used /// elsewhere to identify this version of the document. pub fn master_identifier(&self) -> Option<Identifier> { if let Some(val) = self.value.get("masterIdentifier") { return Some(Identifier { value: Cow::Borrowed(val), }); } return None; } /// The metadata about the resource. This is content that is maintained by the /// infrastructure. Changes to the content might not always be associated with /// version changes to the resource. pub fn meta(&self) -> Option<Meta> { if let Some(val) = self.value.get("meta") { return Some(Meta { value: Cow::Borrowed(val), }); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the resource and that modifies the understanding of the element /// that contains it and/or the understanding of the containing element's /// descendants. Usually modifier elements provide negation or qualification. To /// make the use of extensions safe and manageable, there is a strict set of /// governance applied to the definition and use of extensions. Though any /// implementer is allowed to define an extension, there is a set of requirements /// that SHALL be met as part of the definition of the extension. Applications /// processing a resource are required to check for modifier extensions. Modifier /// extensions SHALL NOT change the meaning of any elements on Resource or /// DomainResource (including cannot change the meaning of modifierExtension /// itself). pub fn modifier_extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("modifierExtension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// Relationships that this document has with other document references that already /// exist. pub fn relates_to(&self) -> Option<Vec<DocumentReference_RelatesTo>> { if let Some(Value::Array(val)) = self.value.get("relatesTo") { return Some( val.into_iter() .map(|e| DocumentReference_RelatesTo { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// A set of Security-Tag codes specifying the level of privacy/security of the /// Document. Note that DocumentReference.meta.security contains the security labels /// of the "reference" to the document, while DocumentReference.securityLabel /// contains a snapshot of the security labels on the document the reference refers /// to. pub fn security_label(&self) -> Option<Vec<CodeableConcept>> { if let Some(Value::Array(val)) = self.value.get("securityLabel") { return Some( val.into_iter() .map(|e| CodeableConcept { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The status of this document reference. pub fn status(&self) -> Option<DocumentReferenceStatus> { if let Some(Value::String(val)) = self.value.get("status") { return Some(DocumentReferenceStatus::from_string(&val).unwrap()); } return None; } /// Who or what the document is about. The document can be about a person, (patient /// or healthcare practitioner), a device (e.g. a machine) or even a group of /// subjects (such as a document about a herd of farm animals, or a set of patients /// that share a common exposure). pub fn subject(&self) -> Option<Reference> { if let Some(val) = self.value.get("subject") { return Some(Reference { value: Cow::Borrowed(val), }); } return None; } /// A human-readable narrative that contains a summary of the resource and can be /// used to represent the content of the resource to a human. The narrative need not /// encode all the structured data, but is required to contain sufficient detail to /// make it "clinically safe" for a human to just read the narrative. Resource /// definitions may define what content should be represented in the narrative to /// ensure clinical safety. pub fn text(&self) -> Option<Narrative> { if let Some(val) = self.value.get("text") { return Some(Narrative { value: Cow::Borrowed(val), }); } return None; } /// Specifies the particular kind of document referenced (e.g. History and /// Physical, Discharge Summary, Progress Note). This usually equates to the purpose /// of making the document referenced. pub fn fhir_type(&self) -> Option<CodeableConcept> { if let Some(val) = self.value.get("type") { return Some(CodeableConcept { value: Cow::Borrowed(val), }); } return None; } pub fn validate(&self) -> bool { if let Some(_val) = self._date() { if !_val.validate() { return false; } } if let Some(_val) = self._description() { if !_val.validate() { return false; } } if let Some(_val) = self._doc_status() { if !_val.validate() { return false; } } if let Some(_val) = self._implicit_rules() { if !_val.validate() { return false; } } if let Some(_val) = self._language() { if !_val.validate() { return false; } } if let Some(_val) = self._status() { if !_val.validate() { return false; } } if let Some(_val) = self.authenticator() { if !_val.validate() { return false; } } if let Some(_val) = self.author() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.category() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.contained() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if !self .content() .into_iter() .map(|e| e.validate()) .all(|x| x == true) { return false; } if let Some(_val) = self.context() { if !_val.validate() { return false; } } if let Some(_val) = self.custodian() { if !_val.validate() { return false; } } if let Some(_val) = self.date() {} if let Some(_val) = self.description() {} if let Some(_val) = self.doc_status() {} if let Some(_val) = self.extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.id() {} if let Some(_val) = self.identifier() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.implicit_rules() {} if let Some(_val) = self.language() {} if let Some(_val) = self.master_identifier() { if !_val.validate() { return false; } } if let Some(_val) = self.meta() { if !_val.validate() { return false; } } if let Some(_val) = self.modifier_extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.relates_to() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.security_label() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.status() {} if let Some(_val) = self.subject() { if !_val.validate() { return false; } } if let Some(_val) = self.text() { if !_val.validate() { return false; } } if let Some(_val) = self.fhir_type() { if !_val.validate() { return false; } } return true; } } #[derive(Debug)] pub struct DocumentReferenceBuilder { pub(crate) value: Value, } impl DocumentReferenceBuilder { pub fn build(&self) -> DocumentReference { DocumentReference { value: Cow::Owned(self.value.clone()), } } pub fn with(existing: DocumentReference) -> DocumentReferenceBuilder { DocumentReferenceBuilder { value: (*existing.value).clone(), } } pub fn new(content: Vec<DocumentReference_Content>) -> DocumentReferenceBuilder { let mut __value: Value = json!({}); __value["content"] = json!(content.into_iter().map(|e| e.value).collect::<Vec<_>>()); return DocumentReferenceBuilder { value: __value }; } pub fn _date<'a>(&'a mut self, val: Element) -> &'a mut DocumentReferenceBuilder { self.value["_date"] = json!(val.value); return self; } pub fn _description<'a>(&'a mut self, val: Element) -> &'a mut DocumentReferenceBuilder { self.value["_description"] = json!(val.value); return self; } pub fn _doc_status<'a>(&'a mut self, val: Element) -> &'a mut DocumentReferenceBuilder { self.value["_docStatus"] = json!(val.value); return self; } pub fn _implicit_rules<'a>(&'a mut self, val: Element) -> &'a mut DocumentReferenceBuilder { self.value["_implicitRules"] = json!(val.value); return self; } pub fn _language<'a>(&'a mut self, val: Element) -> &'a mut DocumentReferenceBuilder { self.value["_language"] = json!(val.value); return self; } pub fn _status<'a>(&'a mut self, val: Element) -> &'a mut DocumentReferenceBuilder { self.value["_status"] = json!(val.value); return self; } pub fn authenticator<'a>(&'a mut self, val: Reference) -> &'a mut DocumentReferenceBuilder { self.value["authenticator"] = json!(val.value); return self; } pub fn author<'a>(&'a mut self, val: Vec<Reference>) -> &'a mut DocumentReferenceBuilder { self.value["author"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn category<'a>( &'a mut self, val: Vec<CodeableConcept>, ) -> &'a mut DocumentReferenceBuilder { self.value["category"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn contained<'a>(&'a mut self, val: Vec<ResourceList>) -> &'a mut DocumentReferenceBuilder { self.value["contained"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn context<'a>( &'a mut self, val: DocumentReference_Context, ) -> &'a mut DocumentReferenceBuilder { self.value["context"] = json!(val.value); return self; } pub fn custodian<'a>(&'a mut self, val: Reference) -> &'a mut DocumentReferenceBuilder { self.value["custodian"] = json!(val.value); return self; } pub fn date<'a>(&'a mut self, val: &str) -> &'a mut DocumentReferenceBuilder { self.value["date"] = json!(val); return self; } pub fn description<'a>(&'a mut self, val: &str) -> &'a mut DocumentReferenceBuilder { self.value["description"] = json!(val); return self; } pub fn doc_status<'a>(&'a mut self, val: &str) -> &'a mut DocumentReferenceBuilder { self.value["docStatus"] = json!(val); return self; } pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut DocumentReferenceBuilder { self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn id<'a>(&'a mut self, val: &str) -> &'a mut DocumentReferenceBuilder { self.value["id"] = json!(val); return self; } pub fn identifier<'a>(&'a mut self, val: Vec<Identifier>) -> &'a mut DocumentReferenceBuilder { self.value["identifier"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn implicit_rules<'a>(&'a mut self, val: &str) -> &'a mut DocumentReferenceBuilder { self.value["implicitRules"] = json!(val); return self; } pub fn language<'a>(&'a mut self, val: &str) -> &'a mut DocumentReferenceBuilder { self.value["language"] = json!(val); return self; } pub fn master_identifier<'a>( &'a mut self, val: Identifier, ) -> &'a mut DocumentReferenceBuilder { self.value["masterIdentifier"] = json!(val.value); return self; } pub fn meta<'a>(&'a mut self, val: Meta) -> &'a mut DocumentReferenceBuilder { self.value["meta"] = json!(val.value); return self; } pub fn modifier_extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut DocumentReferenceBuilder { self.value["modifierExtension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn relates_to<'a>( &'a mut self, val: Vec<DocumentReference_RelatesTo>, ) -> &'a mut DocumentReferenceBuilder { self.value["relatesTo"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn security_label<'a>( &'a mut self, val: Vec<CodeableConcept>, ) -> &'a mut DocumentReferenceBuilder { self.value["securityLabel"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn status<'a>( &'a mut self, val: DocumentReferenceStatus, ) -> &'a mut DocumentReferenceBuilder { self.value["status"] = json!(val.to_string()); return self; } pub fn subject<'a>(&'a mut self, val: Reference) -> &'a mut DocumentReferenceBuilder { self.value["subject"] = json!(val.value); return self; } pub fn text<'a>(&'a mut self, val: Narrative) -> &'a mut DocumentReferenceBuilder { self.value["text"] = json!(val.value); return self; } pub fn fhir_type<'a>(&'a mut self, val: CodeableConcept) -> &'a mut DocumentReferenceBuilder { self.value["type"] = json!(val.value); return self; } } #[derive(Debug)] pub enum DocumentReferenceStatus { Current, Superseded, EnteredInError, } impl DocumentReferenceStatus { pub fn from_string(string: &str) -> Option<DocumentReferenceStatus> { match string { "current" => Some(DocumentReferenceStatus::Current), "superseded" => Some(DocumentReferenceStatus::Superseded), "entered-in-error" => Some(DocumentReferenceStatus::EnteredInError), _ => None, } } pub fn to_string(&self) -> String { match self { DocumentReferenceStatus::Current => "current".to_string(), DocumentReferenceStatus::Superseded => "superseded".to_string(), DocumentReferenceStatus::EnteredInError => "entered-in-error".to_string(), } } }
34.258621
100
0.552553
87f5e55eb9eaaaee2d8a3c5bd411c7e3f6953af0
953
//! A lot of applications and programming language implementations have been //! recently found to be vulnerable to denial-of-service attacks when a hash //! function with weak security guarantees, like Murmurhash 3, was used to //! construct a hash table. //! //! In order to address this, Sodium provides the `shorthash()` function. //! This very fast hash functions outputs short, but unpredictable //! (without knowing the secret key) values suitable for picking a list in //! a hash table for a given key. //! //! # Selected primitive //! `shorthash()` is currently an implementation of `SipHash-2-4` as specified in //! [SipHash: a fast short-input PRF](https://131002.net/siphash/) //! //! # Example //! ``` //! use exonum_sodiumoxide::crypto::shorthash; //! //! let key = shorthash::gen_key(); //! let data_to_hash = b"some data"; //! let digest = shorthash::shorthash(data_to_hash, &key); //! ``` pub use self::siphash24::*; pub mod siphash24;
38.12
81
0.70724
48da43bfcf417bd786f2c4b3c793ad471b306fbf
743
use eosio_numstr::symbol_from_bytes; use proc_macro2::{Literal, TokenStream}; use quote::{ToTokens, TokenStreamExt}; use syn::{ parse::{Parse, ParseStream, Result}, {LitInt, LitStr, Token}, }; pub struct EosioSymbol(u64); impl Parse for EosioSymbol { fn parse(input: ParseStream) -> Result<Self> { let precision = input.parse::<LitInt>()?.base10_parse::<u8>()?; input.parse::<Token![,]>()?; let code = input.parse::<LitStr>()?.value(); symbol_from_bytes(precision, code.bytes()) .map(Self) .map_err(|e| input.error(e)) } } impl ToTokens for EosioSymbol { fn to_tokens(&self, tokens: &mut TokenStream) { tokens.append(Literal::u64_suffixed(self.0)) } }
27.518519
71
0.627187
482a33203bcbda3cb9c3cbdf906ba476e76f0779
1,847
use crate::{ Mut, Error, Context, }; #[derive(Clone, Copy, PartialEq, Debug)] pub enum ValType { I32, I64, F32, F64, } pub type ResultType = Vec<ValType>; pub type FuncType = (ResultType, ResultType); #[derive(Clone, Default, PartialEq)] pub struct Limits { pub min: u32, pub max: Option<u32>, } impl Limits { pub fn validate(&self, _context: &Context, value: usize) -> Result<usize, Error> { if let Some(max) = self.max { if max < self.min { return Err(Error::Invalid); } } Ok(value) } } #[derive(Clone, PartialEq)] pub struct MemType(pub Limits); impl MemType { pub fn validate(&self, context: &Context) -> Result<(), Error> { let _ = self.0.validate(context, u16::MAX as usize)?; Ok(()) } } #[derive(Clone, PartialEq)] pub struct TableType(pub Limits, pub ElemType); impl TableType { pub fn is_funcref(&self) -> bool { true } pub fn validate(&self, context: &Context) -> Result<(), Error> { let _ = self.0.validate(context, u32::MAX as usize)?; Ok(()) } } #[derive(Clone, PartialEq)] pub enum ElemType { FuncRef, } #[derive(Clone, PartialEq)] pub struct GlobalType(pub ValType, pub Mut); impl GlobalType { pub fn is_var(&self) -> bool { self.1 == Mut::Var } } pub enum ExternType { Func(FuncType), Table(TableType), Mem(MemType), Global(GlobalType), } impl ExternType { pub fn validate(&self, context: &Context) -> Result<(), Error> { match &self { ExternType::Func(_) => {}, ExternType::Table(tabletype) => { tabletype.validate(context)?; }, ExternType::Mem(memtype) => { memtype.validate(context)?; }, ExternType::Global(_) => {}, } Ok(()) } }
21.988095
86
0.567407
164862f8c42d6bf7bf472b6f62e39fb0388401ea
572
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. mod device_type; mod errno; mod file_mode; mod open_flags; mod resource_limits; mod signals; mod time; mod user_address; mod user_buffer; pub mod as_any; pub mod range_ext; pub mod uapi; pub use device_type::*; pub use errno::*; pub use file_mode::*; pub use open_flags::*; pub use resource_limits::*; pub use signals::*; pub use time::*; pub use uapi::*; pub use user_address::*; pub use user_buffer::*;
19.724138
73
0.734266
d6b8cc7dc7007366b9f5f9037a02f7461cc796b6
3,313
use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::str::FromStr; use serde_derive::Deserialize; use crate::project::VutError; use crate::util; mod custom_file_updater; mod custom_source_type; mod glob; mod regex; mod template_processor; mod templates; mod update_files; mod update_version_sources; pub use self::custom_file_updater::*; pub use self::custom_source_type::*; pub use self::glob::*; pub use self::regex::*; pub use self::template_processor::*; pub use self::templates::*; pub use self::update_files::*; pub use self::update_version_sources::*; pub const VUT_CONFIG_DEFAULT: &str = include_str!("default_config.toml"); pub const VUT_CONFIG_EXAMPLE: &str = include_str!("example_config.toml"); /// One or more version source types #[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "kebab-case")] pub struct VersionSourceTypes(pub HashSet<String>); #[derive(Debug, Default, Deserialize)] #[serde(default)] #[serde(rename_all = "kebab-case")] pub struct General { pub ignore: Option<Globs>, } #[derive(Debug, Default, Deserialize)] #[serde(rename_all = "kebab-case")] pub struct AuthoritativeVersionSource { #[serde(rename = "type")] pub _type: Option<String>, pub path: Option<PathBuf>, } #[derive(Debug, Default, Deserialize)] #[serde(default)] #[serde(rename_all = "kebab-case")] pub struct VutConfig { pub general: General, pub authoritative_version_source: AuthoritativeVersionSource, pub file_updaters: HashMap<String, CustomFileUpdaterTypeDef>, pub version_source_types: HashMap<String, CustomSourceTypeDef>, pub update_files: Vec<UpdateFilesDef>, pub update_version_sources: Vec<UpdateVersionSourcesDef>, pub templates: Vec<TemplatesDef>, } impl VutConfig { pub fn from_file(path: &Path) -> Result<Self, VutError> { let mut file = util::open_file(path).map_err(VutError::OpenConfig)?; let mut toml_str = String::new(); file.read_to_string(&mut toml_str).map_err(VutError::ReadConfig)?; Self::from_str(&toml_str) } pub fn legacy() -> Self { Self { general: General { ignore: Some(Globs::Single("**/.git".to_owned())), }, authoritative_version_source: Default::default(), file_updaters: HashMap::new(), version_source_types: HashMap::new(), update_files: Vec::new(), update_version_sources: Vec::new(), templates: vec![TemplatesDef { globs: Globs::Single("**/*.vutemplate".to_owned()), start_path: None, output_path: None, processor: None, encoding: None, }], } } } impl FromStr for VutConfig { type Err = VutError; fn from_str(s: &str) -> Result<Self, Self::Err> { let config: VutConfig = toml::from_str(s).map_err(|err| VutError::ParseConfig(Cow::Owned(err.to_string())))?; Ok(config) } } pub fn create_config_file(path: &Path, text: &str) -> Result<VutConfig, VutError> { util::create_file(&path) .map_err(VutError::OpenConfig)? .write(text.as_bytes()) .map_err(VutError::WriteConfig)?; VutConfig::from_str(text) }
28.808696
117
0.658316
f7f21e598f5d6ecda130e9427c925cd3371a33db
5,159
use std::{ error::Error as StdError, fmt::{self, Display}, str::FromStr, }; use mime::Mime; use super::{Header, HeaderName}; use crate::BoxError; /// `Content-Type` of the body /// /// This struct can represent any valid [mime type], which can be parsed via /// [`ContentType::parse`]. Constants are provided for the most-used mime-types. /// /// Defined in [RFC2045](https://tools.ietf.org/html/rfc2045#section-5) /// /// [mime type]: https://www.iana.org/assignments/media-types/media-types.xhtml #[derive(Debug, Clone, PartialEq, Eq)] pub struct ContentType(Mime); impl ContentType { /// A `ContentType` of type `text/plain; charset=utf-8` /// /// Indicates that the body is in utf-8 encoded plain text. pub const TEXT_PLAIN: ContentType = Self::from_mime(mime::TEXT_PLAIN_UTF_8); /// A `ContentType` of type `text/html; charset=utf-8` /// /// Indicates that the body is in utf-8 encoded html. pub const TEXT_HTML: ContentType = Self::from_mime(mime::TEXT_HTML_UTF_8); /// Parse `s` into `ContentType` pub fn parse(s: &str) -> Result<ContentType, ContentTypeErr> { Ok(Self::from_mime(s.parse().map_err(ContentTypeErr)?)) } pub(crate) const fn from_mime(mime: Mime) -> Self { Self(mime) } pub(crate) fn as_ref(&self) -> &Mime { &self.0 } } impl Header for ContentType { fn name() -> HeaderName { HeaderName::new_from_ascii_str("Content-Type") } fn parse(s: &str) -> Result<Self, BoxError> { Ok(Self(s.parse()?)) } fn display(&self) -> String { self.0.to_string() } } impl FromStr for ContentType { type Err = ContentTypeErr; fn from_str(s: &str) -> Result<Self, Self::Err> { Self::parse(s) } } #[cfg(feature = "mime03")] #[cfg_attr(docsrs, doc(cfg(feature = "mime03")))] impl From<Mime> for ContentType { fn from(mime: Mime) -> Self { Self::from_mime(mime) } } /// An error occurred while trying to [`ContentType::parse`]. #[derive(Debug)] pub struct ContentTypeErr(mime::FromStrError); impl StdError for ContentTypeErr { fn source(&self) -> Option<&(dyn StdError + 'static)> { Some(&self.0) } } impl Display for ContentTypeErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Display::fmt(&self.0, f) } } // -- Serialization and Deserialization -- #[cfg(feature = "serde")] mod serde { use serde::de::{self, Deserialize, Deserializer, Visitor}; use serde::ser::{Serialize, Serializer}; use std::fmt; use super::ContentType; impl Serialize for ContentType { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_newtype_struct("ContentType", &format!("{}", &self.0)) } } impl<'de> Deserialize<'de> for ContentType { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct ContentTypeVisitor; impl<'de> Visitor<'de> for ContentTypeVisitor { type Value = ContentType; // The error message which states what the Visitor expects to // receive fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a ContentType string like `text/plain`") } fn visit_str<E>(self, mime: &str) -> Result<ContentType, E> where E: de::Error, { match ContentType::parse(mime) { Ok(content_type) => Ok(content_type), Err(_) => Err(E::custom(format!( "Couldn't parse the following MIME-Type: {}", mime ))), } } } deserializer.deserialize_str(ContentTypeVisitor) } } } #[cfg(test)] mod test { use super::ContentType; use crate::message::header::{HeaderName, Headers}; #[test] fn format_content_type() { let mut headers = Headers::new(); headers.set(ContentType::TEXT_PLAIN); assert_eq!( headers.to_string(), "Content-Type: text/plain; charset=utf-8\r\n" ); headers.set(ContentType::TEXT_HTML); assert_eq!( headers.to_string(), "Content-Type: text/html; charset=utf-8\r\n" ); } #[test] fn parse_content_type() { let mut headers = Headers::new(); headers.insert_raw( HeaderName::new_from_ascii_str("Content-Type"), "text/plain; charset=utf-8".to_string(), ); assert_eq!(headers.get::<ContentType>(), Some(ContentType::TEXT_PLAIN)); headers.insert_raw( HeaderName::new_from_ascii_str("Content-Type"), "text/html; charset=utf-8".to_string(), ); assert_eq!(headers.get::<ContentType>(), Some(ContentType::TEXT_HTML)); } }
27.010471
88
0.567164
bfeedfb63d7d82e147927afeff2f0b49061dded2
17,930
//! Types which represent a SQL data type. //! //! The structs in this module are *only* used as markers to represent a SQL type. //! They should never be used in your structs. //! If you'd like to know the rust types which can be used for a given SQL type, //! see the documentation for that SQL type. //! Additional types may be provided by other crates. //! //! To see which SQL type can be used with a given Rust type, //! see the "Implementors" section of [`FromSql`]. //! //! [`FromSql`]: super::deserialize::FromSql //! //! Any backend specific types are re-exported through this module mod fold; pub mod ops; mod ord; pub use self::fold::Foldable; pub use self::ord::SqlOrd; use crate::expression::TypedExpressionType; use crate::query_builder::QueryId; /// The boolean SQL type. /// /// On backends without a native boolean type, /// this is emulated with the smallest supported integer. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`bool`][bool] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`bool`][bool] /// /// [bool]: https://doc.rust-lang.org/nightly/std/primitive.bool.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "16", array_oid = "1000")] #[sqlite_type = "Integer"] #[mysql_type = "Tiny"] pub struct Bool; /// The tiny integer SQL type. /// /// This is only available on MySQL. /// Keep in mind that `diesel print-schema` will see `TINYINT(1)` as `Bool`, /// not `TinyInt`. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`i8`][i8] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`i8`][i8] /// /// [i8]: https://doc.rust-lang.org/nightly/std/primitive.i8.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[mysql_type = "Tiny"] pub struct TinyInt; #[doc(hidden)] pub type Tinyint = TinyInt; /// The small integer SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`i16`][i16] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`i16`][i16] /// /// [i16]: https://doc.rust-lang.org/nightly/std/primitive.i16.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "21", array_oid = "1005")] #[sqlite_type = "SmallInt"] #[mysql_type = "Short"] pub struct SmallInt; #[doc(hidden)] pub type Int2 = SmallInt; #[doc(hidden)] pub type Smallint = SmallInt; /// The integer SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`i32`][i32] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`i32`][i32] /// /// [i32]: https://doc.rust-lang.org/nightly/std/primitive.i32.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "23", array_oid = "1007")] #[sqlite_type = "Integer"] #[mysql_type = "Long"] pub struct Integer; #[doc(hidden)] pub type Int4 = Integer; /// The big integer SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`i64`][i64] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`i64`][i64] /// /// [i64]: https://doc.rust-lang.org/nightly/std/primitive.i64.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "20", array_oid = "1016")] #[sqlite_type = "Long"] #[mysql_type = "LongLong"] pub struct BigInt; #[doc(hidden)] pub type Int8 = BigInt; #[doc(hidden)] pub type Bigint = BigInt; /// The float SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`f32`][f32] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`f32`][f32] /// /// [f32]: https://doc.rust-lang.org/nightly/std/primitive.f32.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "700", array_oid = "1021")] #[sqlite_type = "Float"] #[mysql_type = "Float"] pub struct Float; #[doc(hidden)] pub type Float4 = Float; /// The double precision float SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`f64`][f64] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`f64`][f64] /// /// [f64]: https://doc.rust-lang.org/nightly/std/primitive.f64.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "701", array_oid = "1022")] #[sqlite_type = "Double"] #[mysql_type = "Double"] pub struct Double; #[doc(hidden)] pub type Float8 = Double; /// The arbitrary precision numeric SQL type. /// /// This type is only supported on PostgreSQL and MySQL. /// On SQLite, [`Double`] should be used instead. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`bigdecimal::BigDecimal`] with `feature = ["numeric"]` /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`bigdecimal::BigDecimal`] with `feature = ["numeric"]` /// /// [`bigdecimal::BigDecimal`]: /bigdecimal/struct.BigDecimal.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1700", array_oid = "1231")] #[mysql_type = "Numeric"] #[sqlite_type = "Double"] pub struct Numeric; /// Alias for `Numeric` pub type Decimal = Numeric; /// The text SQL type. /// /// On all backends strings must be valid UTF-8. /// On PostgreSQL strings must not include nul bytes. /// /// Schema inference will treat all variants of `TEXT` as this type (e.g. /// `VARCHAR`, `MEDIUMTEXT`, etc). /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`String`][String] /// - [`&str`][str] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`String`][String] /// /// [String]: std::string::String /// [str]: https://doc.rust-lang.org/nightly/std/primitive.str.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "25", array_oid = "1009")] #[sqlite_type = "Text"] #[mysql_type = "String"] pub struct Text; /// The SQL `VARCHAR` type /// /// This type is generally interchangeable with `TEXT`, so Diesel has this as an /// alias rather than a separate type (Diesel does not currently support /// implicit coercions). /// /// One notable exception to this is with arrays on PG. `TEXT[]` cannot be /// coerced to `VARCHAR[]`. It is recommended that you always use `TEXT[]` if /// you need a string array on PG. pub type VarChar = Text; #[doc(hidden)] pub type Varchar = VarChar; #[doc(hidden)] pub type Char = Text; #[doc(hidden)] pub type Tinytext = Text; #[doc(hidden)] pub type Mediumtext = Text; #[doc(hidden)] pub type Longtext = Text; /// The binary SQL type. /// /// Schema inference will treat all variants of `BLOB` as this type (e.g. /// `VARBINARY`, `MEDIUMBLOB`, etc). /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`Vec<u8>`][Vec] /// - [`&[u8]`][slice] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`Vec<u8>`][Vec] /// /// [Vec]: std::vec::Vec /// [slice]: https://doc.rust-lang.org/nightly/std/primitive.slice.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "17", array_oid = "1001")] #[sqlite_type = "Binary"] #[mysql_type = "Blob"] pub struct Binary; #[doc(hidden)] pub type Tinyblob = Binary; #[doc(hidden)] pub type Blob = Binary; #[doc(hidden)] pub type Mediumblob = Binary; #[doc(hidden)] pub type Longblob = Binary; #[doc(hidden)] pub type Varbinary = Binary; #[doc(hidden)] pub type Bit = Binary; /// The date SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`chrono::NaiveDate`][NaiveDate] with `feature = "chrono"` /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`chrono::NaiveDate`][NaiveDate] with `feature = "chrono"` /// /// [NaiveDate]: https://docs.rs/chrono/*/chrono/naive/struct.NaiveDate.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1082", array_oid = "1182")] #[sqlite_type = "Text"] #[mysql_type = "Date"] pub struct Date; /// The interval SQL type. /// /// This type is currently only implemented for PostgreSQL. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`PgInterval`] which can be constructed using [`IntervalDsl`] /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`PgInterval`] which can be constructed using [`IntervalDsl`] /// /// [`PgInterval`]: ../pg/data_types/struct.PgInterval.html /// [`IntervalDsl`]: ../pg/expression/extensions/trait.IntervalDsl.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1186", array_oid = "1187")] pub struct Interval; /// The time SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`chrono::NaiveTime`][NaiveTime] with `feature = "chrono"` /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`chrono::NaiveTime`][NaiveTime] with `feature = "chrono"` /// /// [NaiveTime]: /chrono/naive/time/struct.NaiveTime.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1083", array_oid = "1183")] #[sqlite_type = "Text"] #[mysql_type = "Time"] pub struct Time; /// The timestamp SQL type. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - [`std::time::SystemTime`][SystemTime] (PG only) /// - [`chrono::NaiveDateTime`][NaiveDateTime] with `feature = "chrono"` /// - [`time::Timespec`][Timespec] with `feature = "deprecated-time"` (PG only) /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - [`std::time::SystemTime`][SystemTime] (PG only) /// - [`chrono::NaiveDateTime`][NaiveDateTime] with `feature = "chrono"` /// - [`time::Timespec`][Timespec] with `feature = "deprecated-time"` (PG only) /// /// [SystemTime]: std::time::SystemTime #[cfg_attr( feature = "chrono", doc = " [NaiveDateTime]: chrono::naive::NaiveDateTime" )] #[cfg_attr( not(feature = "chrono"), doc = " [NaiveDateTime]: https://docs.rs/chrono/*/chrono/naive/struct.NaiveDateTime.html" )] /// [Timespec]: /time/struct.Timespec.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1114", array_oid = "1115")] #[sqlite_type = "Text"] #[mysql_type = "Timestamp"] pub struct Timestamp; /// The JSON SQL type. This type can only be used with `feature = /// "serde_json"` /// /// For postgresql you should normally prefer [`Jsonb`](struct.Jsonb.html) instead, /// for the reasons discussed there. /// /// ### [`ToSql`] impls /// /// - [`serde_json::Value`] /// /// ### [`FromSql`] impls /// /// - [`serde_json::Value`] /// /// [`ToSql`]: /serialize/trait.ToSql.html /// [`FromSql`]: /deserialize/trait.FromSql.html /// [`serde_json::Value`]: /../serde_json/value/enum.Value.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "114", array_oid = "199")] #[mysql_type = "String"] pub struct Json; /// The nullable SQL type. /// /// This wraps another SQL type to indicate that it can be null. /// By default all values are assumed to be `NOT NULL`. /// /// ### [`ToSql`](crate::serialize::ToSql) impls /// /// - Any `T` which implements `ToSql<ST>` /// - `Option<T>` for any `T` which implements `ToSql<ST>` /// /// ### [`FromSql`](crate::deserialize::FromSql) impls /// /// - `Option<T>` for any `T` which implements `FromSql<ST>` #[derive(Debug, Clone, Copy, Default)] pub struct Nullable<ST>(ST); impl<ST> SqlType for Nullable<ST> where ST: SqlType, { type IsNull = is_nullable::IsNullable; } #[cfg(feature = "postgres_backend")] pub use crate::pg::types::sql_types::*; #[cfg(feature = "mysql_backend")] pub use crate::mysql::types::*; /// Indicates that a SQL type exists for a backend. /// /// This trait can be derived using the [`SqlType` derive](derive@SqlType) /// /// # Example /// /// ```rust /// #[derive(diesel::sql_types::SqlType)] /// #[postgres(oid = "23", array_oid = "1007")] /// #[sqlite_type = "Integer"] /// #[mysql_type = "Long"] /// pub struct Integer; /// ``` pub trait HasSqlType<ST>: TypeMetadata { /// Fetch the metadata for the given type /// /// This method may use `lookup` to do dynamic runtime lookup. Implementors /// of this method should not do dynamic lookup unless absolutely necessary fn metadata(lookup: &mut Self::MetadataLookup) -> Self::TypeMetadata; } /// Information about how a backend stores metadata about given SQL types pub trait TypeMetadata { /// The actual type used to represent metadata. /// /// On PostgreSQL, this is the type's OID. /// On MySQL and SQLite, this is an enum representing all storage classes /// they support. type TypeMetadata; /// The type used for runtime lookup of metadata. /// /// For most backends, which don't support user defined types, this will /// be `()`. type MetadataLookup: ?Sized; } /// Converts a type which may or may not be nullable into its nullable /// representation. pub trait IntoNullable { /// The nullable representation of this type. /// /// For all types except `Nullable`, this will be `Nullable<Self>`. type Nullable; } impl<T> IntoNullable for T where T: SqlType<IsNull = is_nullable::NotNull> + SingleValue, { type Nullable = Nullable<T>; } impl<T> IntoNullable for Nullable<T> where T: SqlType, { type Nullable = Self; } /// Converts a type which may or may not be nullable into its not nullable /// representation. pub trait IntoNotNullable { /// The not nullable representation of this type. /// /// For `Nullable<T>`, this will be `T` otherwise the type itself type NotNullable; } impl<T> IntoNotNullable for T where T: SqlType<IsNull = is_nullable::NotNull>, { type NotNullable = T; } impl<T> IntoNotNullable for Nullable<T> where T: SqlType, { type NotNullable = T; } /// A marker trait indicating that a SQL type represents a single value, as /// opposed to a list of values. /// /// This trait should generally be implemented for all SQL types with the /// exception of Rust tuples. If a column could have this as its type, this /// trait should be implemented. /// /// # Deriving /// /// This trait is automatically implemented by [`#[derive(SqlType)]`](derive@SqlType) /// pub trait SingleValue: SqlType {} impl<T: SqlType + SingleValue> SingleValue for Nullable<T> {} #[doc(inline)] pub use diesel_derives::DieselNumericOps; #[doc(inline)] pub use diesel_derives::SqlType; /// A marker trait for SQL types /// /// # Deriving /// /// This trait is automatically implemented by [`#[derive(SqlType)]`](derive@SqlType) /// which sets `IsNull` to [`is_nullable::NotNull`] /// pub trait SqlType: 'static { /// Is this type nullable? /// /// This type should always be one of the structs in the ['is_nullable`] /// module. See the documentation of those structs for more details. /// /// ['is_nullable`]: is_nullable type IsNull: OneIsNullable<is_nullable::IsNullable> + OneIsNullable<is_nullable::NotNull>; } /// Is one value of `IsNull` nullable? /// /// You should never implement this trait. pub trait OneIsNullable<Other> { /// See the trait documentation type Out: OneIsNullable<is_nullable::IsNullable> + OneIsNullable<is_nullable::NotNull>; } /// Are both values of `IsNull` are nullable? pub trait AllAreNullable<Other> { /// See the trait documentation type Out: AllAreNullable<is_nullable::NotNull> + AllAreNullable<is_nullable::IsNullable>; } /// A type level constructor for maybe nullable types /// /// Constructs either `Nullable<O>` (for `Self` == `is_nullable::IsNullable`) /// or `O` (for `Self` == `is_nullable::NotNull`) pub trait MaybeNullableType<O> { /// See the trait documentation type Out: SqlType + TypedExpressionType; } /// Possible values for `SqlType::IsNullable` pub mod is_nullable { use super::*; /// No, this type cannot be null as it is marked as `NOT NULL` at database level /// /// This should be choosen for basically all manual impls of `SqlType` /// beside implementing your own `Nullable<>` wrapper type #[derive(Debug, Clone, Copy)] pub struct NotNull; /// Yes, this type can be null /// /// The only diesel provided `SqlType` that uses this value is [`Nullable<T>`] /// /// [`Nullable<T>`]: Nullable #[derive(Debug, Clone, Copy)] pub struct IsNullable; impl OneIsNullable<NotNull> for NotNull { type Out = NotNull; } impl OneIsNullable<IsNullable> for NotNull { type Out = IsNullable; } impl OneIsNullable<NotNull> for IsNullable { type Out = IsNullable; } impl OneIsNullable<IsNullable> for IsNullable { type Out = IsNullable; } impl AllAreNullable<NotNull> for NotNull { type Out = NotNull; } impl AllAreNullable<IsNullable> for NotNull { type Out = NotNull; } impl AllAreNullable<NotNull> for IsNullable { type Out = NotNull; } impl AllAreNullable<IsNullable> for IsNullable { type Out = IsNullable; } impl<O> MaybeNullableType<O> for NotNull where O: SqlType + TypedExpressionType, { type Out = O; } impl<O> MaybeNullableType<O> for IsNullable where O: SqlType, Nullable<O>: TypedExpressionType, { type Out = Nullable<O>; } /// Represents the output type of [`MaybeNullableType`] pub type MaybeNullable<N, T> = <N as MaybeNullableType<T>>::Out; /// Represents the output type of [`OneIsNullable`] /// for two given SQL types pub type IsOneNullable<S1, S2> = <IsSqlTypeNullable<S1> as OneIsNullable<IsSqlTypeNullable<S2>>>::Out; /// Represents the output type of [`AllAreNullable`] /// for two given SQL types pub type AreAllNullable<S1, S2> = <IsSqlTypeNullable<S1> as AllAreNullable<IsSqlTypeNullable<S2>>>::Out; /// Represents if the SQL type is nullable or not pub type IsSqlTypeNullable<T> = <T as SqlType>::IsNull; } /// A marker trait for accepting expressions of the type `Bool` and /// `Nullable<Bool>` in the same place pub trait BoolOrNullableBool {} impl BoolOrNullableBool for Bool {} impl BoolOrNullableBool for Nullable<Bool> {} #[doc(inline)] pub use crate::expression::expression_types::Untyped;
28.147567
94
0.648076
723b57ed970ad6bb16bac57787f39029e9ea4f4f
10,701
//! Contains utilities for generating suggestions for borrowck errors related to unsatisfied //! outlives constraints. use rustc_data_structures::fx::FxHashSet; use rustc_errors::DiagnosticBuilder; use rustc_middle::ty::RegionVid; use smallvec::SmallVec; use std::collections::BTreeMap; use tracing::debug; use crate::MirBorrowckCtxt; use super::{ErrorConstraintInfo, RegionName, RegionNameSource}; /// The different things we could suggest. enum SuggestedConstraint { /// Outlives(a, [b, c, d, ...]) => 'a: 'b + 'c + 'd + ... Outlives(RegionName, SmallVec<[RegionName; 2]>), /// 'a = 'b Equal(RegionName, RegionName), /// 'a: 'static i.e. 'a = 'static and the user should just use 'static Static(RegionName), } /// Collects information about outlives constraints that needed to be added for a given MIR node /// corresponding to a function definition. /// /// Adds a help note suggesting adding a where clause with the needed constraints. #[derive(Default)] pub struct OutlivesSuggestionBuilder { /// The list of outlives constraints that need to be added. Specifically, we map each free /// region to all other regions that it must outlive. I will use the shorthand `fr: /// outlived_frs`. Not all of these regions will already have names necessarily. Some could be /// implicit free regions that we inferred. These will need to be given names in the final /// suggestion message. constraints_to_add: BTreeMap<RegionVid, Vec<RegionVid>>, } impl OutlivesSuggestionBuilder { /// Returns `true` iff the `RegionNameSource` is a valid source for an outlives /// suggestion. // // FIXME: Currently, we only report suggestions if the `RegionNameSource` is an early-bound // region or a named region, avoiding using regions with synthetic names altogether. This // allows us to avoid giving impossible suggestions (e.g. adding bounds to closure args). // We can probably be less conservative, since some inferred free regions are namable (e.g. // the user can explicitly name them. To do this, we would allow some regions whose names // come from `MatchedAdtAndSegment`, being careful to filter out bad suggestions, such as // naming the `'self` lifetime in methods, etc. fn region_name_is_suggestable(name: &RegionName) -> bool { match name.source { RegionNameSource::NamedEarlyBoundRegion(..) | RegionNameSource::NamedFreeRegion(..) | RegionNameSource::Static => true, // Don't give suggestions for upvars, closure return types, or other unnamable // regions. RegionNameSource::SynthesizedFreeEnvRegion(..) | RegionNameSource::AnonRegionFromArgument(..) | RegionNameSource::AnonRegionFromUpvar(..) | RegionNameSource::AnonRegionFromOutput(..) | RegionNameSource::AnonRegionFromYieldTy(..) | RegionNameSource::AnonRegionFromAsyncFn(..) => { debug!("Region {:?} is NOT suggestable", name); false } } } /// Returns a name for the region if it is suggestable. See `region_name_is_suggestable`. fn region_vid_to_name( &self, mbcx: &MirBorrowckCtxt<'_, '_>, region: RegionVid, ) -> Option<RegionName> { mbcx.give_region_a_name(region).filter(Self::region_name_is_suggestable) } /// Compiles a list of all suggestions to be printed in the final big suggestion. fn compile_all_suggestions( &self, mbcx: &MirBorrowckCtxt<'_, '_>, ) -> SmallVec<[SuggestedConstraint; 2]> { let mut suggested = SmallVec::new(); // Keep track of variables that we have already suggested unifying so that we don't print // out silly duplicate messages. let mut unified_already = FxHashSet::default(); for (fr, outlived) in &self.constraints_to_add { let Some(fr_name) = self.region_vid_to_name(mbcx, *fr) else { continue; }; let outlived = outlived .iter() // if there is a `None`, we will just omit that constraint .filter_map(|fr| self.region_vid_to_name(mbcx, *fr).map(|rname| (fr, rname))) .collect::<Vec<_>>(); // No suggestable outlived lifetimes. if outlived.is_empty() { continue; } // There are three types of suggestions we can make: // 1) Suggest a bound: 'a: 'b // 2) Suggest replacing 'a with 'static. If any of `outlived` is `'static`, then we // should just replace 'a with 'static. // 3) Suggest unifying 'a with 'b if we have both 'a: 'b and 'b: 'a if outlived .iter() .any(|(_, outlived_name)| matches!(outlived_name.source, RegionNameSource::Static)) { suggested.push(SuggestedConstraint::Static(fr_name)); } else { // We want to isolate out all lifetimes that should be unified and print out // separate messages for them. let (unified, other): (Vec<_>, Vec<_>) = outlived.into_iter().partition( // Do we have both 'fr: 'r and 'r: 'fr? |(r, _)| { self.constraints_to_add .get(r) .map(|r_outlived| r_outlived.as_slice().contains(fr)) .unwrap_or(false) }, ); for (r, bound) in unified.into_iter() { if !unified_already.contains(fr) { suggested.push(SuggestedConstraint::Equal(fr_name.clone(), bound)); unified_already.insert(r); } } if !other.is_empty() { let other = other.iter().map(|(_, rname)| rname.clone()).collect::<SmallVec<_>>(); suggested.push(SuggestedConstraint::Outlives(fr_name, other)) } } } suggested } /// Add the outlives constraint `fr: outlived_fr` to the set of constraints we need to suggest. crate fn collect_constraint(&mut self, fr: RegionVid, outlived_fr: RegionVid) { debug!("Collected {:?}: {:?}", fr, outlived_fr); // Add to set of constraints for final help note. self.constraints_to_add.entry(fr).or_default().push(outlived_fr); } /// Emit an intermediate note on the given `Diagnostic` if the involved regions are /// suggestable. crate fn intermediate_suggestion( &mut self, mbcx: &MirBorrowckCtxt<'_, '_>, errci: &ErrorConstraintInfo, diag: &mut DiagnosticBuilder<'_>, ) { // Emit an intermediate note. let fr_name = self.region_vid_to_name(mbcx, errci.fr); let outlived_fr_name = self.region_vid_to_name(mbcx, errci.outlived_fr); if let (Some(fr_name), Some(outlived_fr_name)) = (fr_name, outlived_fr_name) { if !matches!(outlived_fr_name.source, RegionNameSource::Static) { diag.help(&format!( "consider adding the following bound: `{}: {}`", fr_name, outlived_fr_name )); } } } /// If there is a suggestion to emit, add a diagnostic to the buffer. This is the final /// suggestion including all collected constraints. crate fn add_suggestion(&self, mbcx: &mut MirBorrowckCtxt<'_, '_>) { // No constraints to add? Done. if self.constraints_to_add.is_empty() { debug!("No constraints to suggest."); return; } // If there is only one constraint to suggest, then we already suggested it in the // intermediate suggestion above. if self.constraints_to_add.len() == 1 && self.constraints_to_add.values().next().unwrap().len() == 1 { debug!("Only 1 suggestion. Skipping."); return; } // Get all suggestable constraints. let suggested = self.compile_all_suggestions(mbcx); // If there are no suggestable constraints... if suggested.is_empty() { debug!("Only 1 suggestable constraint. Skipping."); return; } // If there is exactly one suggestable constraints, then just suggest it. Otherwise, emit a // list of diagnostics. let mut diag = if suggested.len() == 1 { mbcx.infcx.tcx.sess.diagnostic().struct_help(&match suggested.last().unwrap() { SuggestedConstraint::Outlives(a, bs) => { let bs: SmallVec<[String; 2]> = bs.iter().map(|r| format!("{}", r)).collect(); format!("add bound `{}: {}`", a, bs.join(" + ")) } SuggestedConstraint::Equal(a, b) => { format!("`{}` and `{}` must be the same: replace one with the other", a, b) } SuggestedConstraint::Static(a) => format!("replace `{}` with `'static`", a), }) } else { // Create a new diagnostic. let mut diag = mbcx .infcx .tcx .sess .diagnostic() .struct_help("the following changes may resolve your lifetime errors"); // Add suggestions. for constraint in suggested { match constraint { SuggestedConstraint::Outlives(a, bs) => { let bs: SmallVec<[String; 2]> = bs.iter().map(|r| format!("{}", r)).collect(); diag.help(&format!("add bound `{}: {}`", a, bs.join(" + "))); } SuggestedConstraint::Equal(a, b) => { diag.help(&format!( "`{}` and `{}` must be the same: replace one with the other", a, b )); } SuggestedConstraint::Static(a) => { diag.help(&format!("replace `{}` with `'static`", a)); } } } diag }; // We want this message to appear after other messages on the mir def. let mir_span = mbcx.body.span; diag.sort_span = mir_span.shrink_to_hi(); // Buffer the diagnostic diag.buffer(&mut mbcx.errors_buffer); } }
40.843511
99
0.563779
11df95ad24644e62587561f03d242fb83dc37255
362
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT use serde::{Deserialize, Serialize}; use tezos_api::ffi::CommitGenesisResult; #[derive(Serialize, Deserialize, Debug, Clone)] pub enum StorageBlocksGenesisInitCommitResultPutState { Init { result: CommitGenesisResult }, Error {}, Success {}, }
27.846154
71
0.751381
0e0e080540147363a96a09eb1ec160b884b9a16d
2,611
use procfs::process::{FDTarget, Process}; use std::path::Path; use std::{ffi::CString, os::unix::ffi::OsStrExt}; fn main() { let myself = Process::myself().unwrap(); let mountinfo = myself.mountinfo().unwrap(); for lock in procfs::locks().unwrap() { lock.pid .and_then(|pid| Process::new(pid).ok()) .and_then(|proc| proc.cmdline().ok()) .and_then(|mut cmd| cmd.drain(..).next()) .map_or_else( || { print!("{:18}", "(undefined)"); }, |s| { let p = Path::new(&s); print!( "{:18}", p.file_name() .unwrap_or_else(|| p.as_os_str()) .to_string_lossy() ); }, ); print!("{:<12} ", lock.pid.unwrap_or(-1)); print!("{:12} ", lock.lock_type.as_str()); print!("{:12} ", lock.mode.as_str()); print!("{:12} ", lock.kind.as_str()); // try to find the path for this inode let mut found = false; if let Some(pid) = lock.pid { if let Ok(fds) = Process::new(pid).and_then(|p| p.fd()) { for fd in fds { if let FDTarget::Path(p) = fd.target { let cstr = CString::new(p.as_os_str().as_bytes()).unwrap(); let mut stat = unsafe { std::mem::zeroed() }; if unsafe { libc::stat(cstr.as_ptr(), &mut stat) } == 0 && stat.st_ino as u64 == lock.inode { print!("{}", p.display()); found = true; break; } } } } } if !found { // we don't have a PID or we don't have permission to inspect the processes files, but we still have the device and inode // There's no way to look up a path from an inode, so just bring the device mount point for mount in &mountinfo { if format!("{}:{}", lock.devmaj, lock.devmin) == mount.majmin { print!("{}...", mount.mount_point.display()); found = true; break; } } } if !found { // still not found? print the device print!("{}:{}", lock.devmaj, lock.devmin); } println!(); } }
34.813333
133
0.406741
db827afdb94f4481096d600aa8db0cd5ec599e9d
29,962
use std::cell::Cell; use std::fmt::Write; use std::mem; use syntax::source_map::{self, Span, DUMMY_SP}; use rustc::hir::def_id::DefId; use rustc::hir::def::DefKind; use rustc::mir; use rustc::ty::layout::{ self, Size, Align, HasDataLayout, LayoutOf, TyLayout }; use rustc::ty::subst::{Subst, SubstsRef}; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::query::TyCtxtAt; use rustc_data_structures::indexed_vec::IndexVec; use rustc::mir::interpret::{ ErrorHandled, GlobalId, Scalar, FrameInfo, AllocId, EvalResult, InterpError, truncate, sign_extend, }; use rustc_data_structures::fx::FxHashMap; use super::{ Immediate, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef, Memory, Machine }; pub struct InterpretCx<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { /// Stores the `Machine` instance. pub machine: M, /// The results of the type checker, from rustc. pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>, /// Bounds in scope for polymorphic evaluations. pub(crate) param_env: ty::ParamEnv<'tcx>, /// The virtual memory system. pub(crate) memory: Memory<'a, 'mir, 'tcx, M>, /// The virtual call stack. pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>>, /// A cache for deduplicating vtables pub(super) vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), AllocId>, } /// A stack frame. #[derive(Clone)] pub struct Frame<'mir, 'tcx: 'mir, Tag=(), Extra=()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// /// The MIR for the function called on this frame. pub mir: &'mir mir::Mir<'tcx>, /// The def_id and substs of the current function. pub instance: ty::Instance<'tcx>, /// The span of the call site. pub span: source_map::Span, //////////////////////////////////////////////////////////////////////////////// // Return place and locals //////////////////////////////////////////////////////////////////////////////// /// Work to perform when returning from this function. pub return_to_block: StackPopCleanup, /// The location where the result of the current stack frame should be written to, /// and its layout in the caller. pub return_place: Option<PlaceTy<'tcx, Tag>>, /// The list of locals for this stack frame, stored in order as /// `[return_ptr, arguments..., variables..., temporaries...]`. /// The locals are stored as `Option<Value>`s. /// `None` represents a local that is currently dead, while a live local /// can either directly contain `Scalar` or refer to some part of an `Allocation`. pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>, //////////////////////////////////////////////////////////////////////////////// // Current position within the function //////////////////////////////////////////////////////////////////////////////// /// The block that is currently executed (or will be executed after the above call stacks /// return). pub block: mir::BasicBlock, /// The index of the currently evaluated statement. pub stmt: usize, /// Extra data for the machine. pub extra: Extra, } #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum StackPopCleanup { /// Jump to the next block in the caller, or cause UB if None (that's a function /// that may never return). Also store layout of return place so /// we can validate it at that layout. Goto(Option<mir::BasicBlock>), /// Just do nohing: Used by Main and for the box_alloc hook in miri. /// `cleanup` says whether locals are deallocated. Static computation /// wants them leaked to intern what they need (and just throw away /// the entire `ecx` when it is done). None { cleanup: bool }, } /// State of a local variable including a memoized layout #[derive(Clone, PartialEq, Eq)] pub struct LocalState<'tcx, Tag=(), Id=AllocId> { pub value: LocalValue<Tag, Id>, /// Don't modify if `Some`, this is only used to prevent computing the layout twice pub layout: Cell<Option<TyLayout<'tcx>>>, } /// Current value of a local variable #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum LocalValue<Tag=(), Id=AllocId> { /// This local is not currently alive, and cannot be used at all. Dead, /// This local is alive but not yet initialized. It can be written to /// but not read from or its address taken. Locals get initialized on /// first write because for unsized locals, we do not know their size /// before that. Uninitialized, /// A normal, live local. /// Mostly for convenience, we re-use the `Operand` type here. /// This is an optimization over just always having a pointer here; /// we can thus avoid doing an allocation when the local just stores /// immediate values *and* never has its address taken. Live(Operand<Tag, Id>), } impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> { pub fn access(&self) -> EvalResult<'tcx, Operand<Tag>> { match self.value { LocalValue::Dead => err!(DeadLocal), LocalValue::Uninitialized => bug!("The type checker should prevent reading from a never-written local"), LocalValue::Live(val) => Ok(val), } } /// Overwrite the local. If the local can be overwritten in place, return a reference /// to do so; otherwise return the `MemPlace` to consult instead. pub fn access_mut( &mut self, ) -> EvalResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> { match self.value { LocalValue::Dead => err!(DeadLocal), LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)), ref mut local @ LocalValue::Live(Operand::Immediate(_)) | ref mut local @ LocalValue::Uninitialized => { Ok(Ok(local)) } } } } impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout for InterpretCx<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &layout::TargetDataLayout { &self.tcx.data_layout } } impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpretCx<'a, 'mir, 'tcx, M> where M: Machine<'a, 'mir, 'tcx> { #[inline] fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> { *self.tcx } } impl<'a, 'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpretCx<'a, 'mir, 'tcx, M> where M: Machine<'a, 'mir, 'tcx> { fn param_env(&self) -> ty::ParamEnv<'tcx> { self.param_env } } impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf for InterpretCx<'a, 'mir, 'tcx, M> { type Ty = Ty<'tcx>; type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; #[inline] fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(self.param_env.and(ty)) .map_err(|layout| InterpError::Layout(layout).into()) } } impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { pub fn new( tcx: TyCtxtAt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, machine: M, ) -> Self { InterpretCx { machine, tcx, param_env, memory: Memory::new(tcx), stack: Vec::new(), vtables: FxHashMap::default(), } } #[inline(always)] pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { &self.memory } #[inline(always)] pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { &mut self.memory } #[inline(always)] pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] { &self.stack } #[inline(always)] pub fn cur_frame(&self) -> usize { assert!(self.stack.len() > 0); self.stack.len() - 1 } #[inline(always)] pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> { self.stack.last().expect("no call frames exist") } #[inline(always)] pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> { self.stack.last_mut().expect("no call frames exist") } #[inline(always)] pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> { self.frame().mir } pub(super) fn subst_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>( &self, substs: T, ) -> EvalResult<'tcx, T> { match self.stack.last() { Some(frame) => Ok(self.tcx.subst_and_normalize_erasing_regions( frame.instance.substs, self.param_env, &substs, )), None => if substs.needs_subst() { err!(TooGeneric).into() } else { Ok(substs) }, } } pub(super) fn resolve( &self, def_id: DefId, substs: SubstsRef<'tcx> ) -> EvalResult<'tcx, ty::Instance<'tcx>> { trace!("resolve: {:?}, {:#?}", def_id, substs); trace!("param_env: {:#?}", self.param_env); let substs = self.subst_and_normalize_erasing_regions(substs)?; trace!("substs: {:#?}", substs); ty::Instance::resolve( *self.tcx, self.param_env, def_id, substs, ).ok_or_else(|| InterpError::TooGeneric.into()) } pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { ty.is_sized(self.tcx, self.param_env) } pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP) } pub fn load_mir( &self, instance: ty::InstanceDef<'tcx>, ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { // do not continue if typeck errors occurred (can only occur in local crate) let did = instance.def_id(); if did.is_local() && self.tcx.has_typeck_tables(did) && self.tcx.typeck_tables_of(did).tainted_by_errors { return err!(TypeckError); } trace!("load mir {:?}", instance); match instance { ty::InstanceDef::Item(def_id) => if self.tcx.is_mir_available(did) { Ok(self.tcx.optimized_mir(did)) } else { err!(NoMirFor(self.tcx.def_path_str(def_id))) }, _ => Ok(self.tcx.instance_mir(instance)), } } pub(super) fn monomorphize<T: TypeFoldable<'tcx> + Subst<'tcx>>( &self, t: T, ) -> EvalResult<'tcx, T> { match self.stack.last() { Some(frame) => Ok(self.monomorphize_with_substs(t, frame.instance.substs)), None => if t.needs_subst() { err!(TooGeneric).into() } else { Ok(t) }, } } fn monomorphize_with_substs<T: TypeFoldable<'tcx> + Subst<'tcx>>( &self, t: T, substs: SubstsRef<'tcx> ) -> T { // miri doesn't care about lifetimes, and will choke on some crazy ones // let's simply get rid of them let substituted = t.subst(*self.tcx, substs); self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substituted) } pub fn layout_of_local( &self, frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, local: mir::Local, layout: Option<TyLayout<'tcx>>, ) -> EvalResult<'tcx, TyLayout<'tcx>> { match frame.locals[local].layout.get() { None => { let layout = crate::interpret::operand::from_known_layout(layout, || { let local_ty = frame.mir.local_decls[local].ty; let local_ty = self.monomorphize_with_substs(local_ty, frame.instance.substs); self.layout_of(local_ty) })?; // Layouts of locals are requested a lot, so we cache them. frame.locals[local].layout.set(Some(layout)); Ok(layout) } Some(layout) => Ok(layout), } } pub fn str_to_immediate(&mut self, s: &str) -> EvalResult<'tcx, Immediate<M::PointerTag>> { let ptr = self.memory.allocate_static_bytes(s.as_bytes()).with_default_tag(); Ok(Immediate::new_slice(Scalar::Ptr(ptr), s.len() as u64, self)) } /// Returns the actual dynamic size and alignment of the place at the given type. /// Only the "meta" (metadata) part of the place matters. /// This can fail to provide an answer for extern types. pub(super) fn size_and_align_of( &self, metadata: Option<Scalar<M::PointerTag>>, layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Size, Align)>> { if !layout.is_unsized() { return Ok(Some((layout.size, layout.align.abi))); } match layout.ty.sty { ty::Adt(..) | ty::Tuple(..) => { // First get the size of all statically known fields. // Don't use type_of::sizing_type_of because that expects t to be sized, // and it also rounds up to alignment, which we want to avoid, // as the unsized field's alignment could be smaller. assert!(!layout.ty.is_simd()); trace!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); let sized_align = layout.align.abi; trace!( "DST {} statically sized prefix size: {:?} align: {:?}", layout.ty, sized_size, sized_align ); // Recurse to get the size of the dynamically sized field (must be // the last field). Can't have foreign types here, how would we // adjust alignment and size for them? let field = layout.field(self, layout.fields.count() - 1)?; let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? { Some(size_and_align) => size_and_align, None => { // A field with extern type. If this field is at offset 0, we behave // like the underlying extern type. // FIXME: Once we have made decisions for how to handle size and alignment // of `extern type`, this should be adapted. It is just a temporary hack // to get some code to work that probably ought to work. if sized_size == Size::ZERO { return Ok(None) } else { bug!("Fields cannot be extern types, unless they are at offset 0") } } }; // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` // required of the unsized field that follows) before // summing it with `sized_size`. (Note that since #26403 // is unfixed, we do not yet add the necessary padding // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. let size = sized_size + unsized_size; // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). let align = sized_align.max(unsized_align); // Issue #27023: must add any necessary padding to `size` // (to make it a multiple of `align`) before returning it. // // Namely, the returned size should be, in C notation: // // `size + ((size & (align-1)) ? align : 0)` // // emulated via the semi-standard fast bit trick: // // `(size + (align-1)) & -align` Ok(Some((size.align_to(align), align))) } ty::Dynamic(..) => { let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?; // the second entry in the vtable is the dynamic size of the object. Ok(Some(self.read_size_and_align_from_vtable(vtable)?)) } ty::Slice(_) | ty::Str => { let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?; let elem = layout.field(self, 0)?; Ok(Some((elem.size * len, elem.align.abi))) } ty::Foreign(_) => { Ok(None) } _ => bug!("size_and_align_of::<{:?}> not supported", layout.ty), } } #[inline] pub fn size_and_align_of_mplace( &self, mplace: MPlaceTy<'tcx, M::PointerTag> ) -> EvalResult<'tcx, Option<(Size, Align)>> { self.size_and_align_of(mplace.meta, mplace.layout) } pub fn push_stack_frame( &mut self, instance: ty::Instance<'tcx>, span: source_map::Span, mir: &'mir mir::Mir<'tcx>, return_place: Option<PlaceTy<'tcx, M::PointerTag>>, return_to_block: StackPopCleanup, ) -> EvalResult<'tcx> { if self.stack.len() > 0 { info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance); } ::log_settings::settings().indentation += 1; // first push a stack frame so we have access to the local substs let extra = M::stack_push(self)?; self.stack.push(Frame { mir, block: mir::START_BLOCK, return_to_block, return_place, // empty local array, we fill it in below, after we are inside the stack frame and // all methods actually know about the frame locals: IndexVec::new(), span, instance, stmt: 0, extra, }); // don't allocate at all for trivial constants if mir.local_decls.len() > 1 { // Locals are initially uninitialized. let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None), }; let mut locals = IndexVec::from_elem(dummy, &mir.local_decls); // Return place is handled specially by the `eval_place` functions, and the // entry in `locals` should never be used. Make it dead, to be sure. locals[mir::RETURN_PLACE].value = LocalValue::Dead; // Now mark those locals as dead that we do not want to initialize match self.tcx.def_kind(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them Some(DefKind::Static) | Some(DefKind::Const) | Some(DefKind::AssociatedConst) => {}, _ => { trace!("push_stack_frame: {:?}: num_bbs: {}", span, mir.basic_blocks().len()); for block in mir.basic_blocks() { for stmt in block.statements.iter() { use rustc::mir::StatementKind::{StorageDead, StorageLive}; match stmt.kind { StorageLive(local) | StorageDead(local) => { locals[local].value = LocalValue::Dead; } _ => {} } } } }, } // done self.frame_mut().locals = locals; } info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance); if self.stack.len() > self.tcx.sess.const_eval_stack_frame_limit { err!(StackFrameLimitReached) } else { Ok(()) } } pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { info!("LEAVING({}) {}", self.cur_frame(), self.frame().instance); ::log_settings::settings().indentation -= 1; let frame = self.stack.pop().expect( "tried to pop a stack frame, but there were none", ); M::stack_pop(self, frame.extra)?; // Abort early if we do not want to clean up: We also avoid validation in that case, // because this is CTFE and the final value will be thoroughly validated anyway. match frame.return_to_block { StackPopCleanup::Goto(_) => {}, StackPopCleanup::None { cleanup } => { if !cleanup { assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked"); // Leak the locals, skip validation. return Ok(()); } } } // Deallocate all locals that are backed by an allocation. for local in frame.locals { self.deallocate_local(local.value)?; } // Validate the return value. Do this after deallocating so that we catch dangling // references. if let Some(return_place) = frame.return_place { if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! // It is still possible that the return place held invalid data while // the function is running, but that's okay because nobody could have // accessed that same data from the "outside" to observe any broken // invariant -- that is, unless a function somehow has a ptr to // its return place... but the way MIR is currently generated, the // return place is always a local and then this cannot happen. self.validate_operand( self.place_to_op(return_place)?, vec![], None, /*const_mode*/false, )?; } } else { // Uh, that shouldn't happen... the function did not intend to return return err!(Unreachable); } // Jump to new block -- *after* validation so that the spans make more sense. match frame.return_to_block { StackPopCleanup::Goto(block) => { self.goto_block(block)?; } StackPopCleanup::None { .. } => {} } if self.stack.len() > 0 { info!("CONTINUING({}) {}", self.cur_frame(), self.frame().instance); } Ok(()) } /// Mark a storage as live, killing the previous content and returning it. /// Remember to deallocate that! pub fn storage_live( &mut self, local: mir::Local ) -> EvalResult<'tcx, LocalValue<M::PointerTag>> { assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); trace!("{:?} is now live", local); let local_val = LocalValue::Uninitialized; // StorageLive *always* kills the value that's currently stored Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val)) } /// Returns the old value of the local. /// Remember to deallocate that! pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> { assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); trace!("{:?} is now dead", local); mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead) } pub(super) fn deallocate_local( &mut self, local: LocalValue<M::PointerTag>, ) -> EvalResult<'tcx> { // FIXME: should we tell the user that there was a local which was never written to? if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { trace!("deallocating local"); let ptr = ptr.to_ptr()?; self.memory.dump_alloc(ptr.alloc_id); self.memory.deallocate_local(ptr)?; }; Ok(()) } pub fn const_eval_raw( &self, gid: GlobalId<'tcx>, ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let param_env = if self.tcx.is_static(gid.instance.def_id()) { ty::ParamEnv::reveal_all() } else { self.param_env }; // We use `const_eval_raw` here, and get an unvalidated result. That is okay: // Our result will later be validated anyway, and there seems no good reason // to have to fail early here. This is also more consistent with // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles. let val = self.tcx.const_eval_raw(param_env.and(gid)).map_err(|err| { match err { ErrorHandled::Reported => InterpError::ReferencedConstant, ErrorHandled::TooGeneric => InterpError::TooGeneric, } })?; self.raw_const_to_mplace(val) } pub fn dump_place(&self, place: Place<M::PointerTag>) { // Debug output if !log_enabled!(::log::Level::Trace) { return; } match place { Place::Local { frame, local } => { let mut allocs = Vec::new(); let mut msg = format!("{:?}", local); if frame != self.cur_frame() { write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap(); } write!(msg, ":").unwrap(); match self.stack[frame].locals[local].value { LocalValue::Dead => write!(msg, " is dead").unwrap(), LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(), LocalValue::Live(Operand::Indirect(mplace)) => { match mplace.ptr { Scalar::Ptr(ptr) => { write!(msg, " by align({}){} ref:", mplace.align.bytes(), match mplace.meta { Some(meta) => format!(" meta({:?})", meta), None => String::new() } ).unwrap(); allocs.push(ptr.alloc_id); } ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), } } LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { write!(msg, " {:?}", val).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); } if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 { allocs.push(ptr.alloc_id); } } } trace!("{}", msg); self.memory.dump_allocs(allocs); } Place::Ptr(mplace) => { match mplace.ptr { Scalar::Ptr(ptr) => { trace!("by align({}) ref:", mplace.align.bytes()); self.memory.dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), } } } } pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> Vec<FrameInfo<'tcx>> { let mut last_span = None; let mut frames = Vec::new(); for &Frame { instance, span, mir, block, stmt, .. } in self.stack().iter().rev() { // make sure we don't emit frames that are duplicates of the previous if explicit_span == Some(span) { last_span = Some(span); continue; } if let Some(last) = last_span { if last == span { continue; } } else { last_span = Some(span); } let block = &mir.basic_blocks()[block]; let source_info = if stmt < block.statements.len() { block.statements[stmt].source_info } else { block.terminator().source_info }; let lint_root = match mir.source_scope_local_data { mir::ClearCrossCrate::Set(ref ivs) => Some(ivs[source_info.scope].lint_root), mir::ClearCrossCrate::Clear => None, }; frames.push(FrameInfo { call_site: span, instance, lint_root }); } trace!("generate stacktrace: {:#?}, {:?}", frames, explicit_span); frames } #[inline(always)] pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 { assert!(ty.abi.is_signed()); sign_extend(value, ty.size) } #[inline(always)] pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 { truncate(value, ty.size) } }
38.810881
100
0.523263
1e25a818169eddfcf482aae5d50892884d716826
1,741
//! Watch example use etcd_client::*; use std::time::Duration; #[tokio::main] async fn main() -> Result<(), Error> { let mut client = Client::connect(["localhost:2379"], None).await?; client.put("foo", "bar", None).await?; println!("put kv: {{foo: bar}}"); client.put("foo1", "bar1", None).await?; println!("put kv: {{foo1: bar1}}"); let (mut watcher, mut stream) = client.watch("foo", None).await?; println!("create watcher {}", watcher.watch_id()); println!(); client.put("foo", "bar2", None).await?; watcher.request_progress().await?; client.delete("foo", None).await?; watcher.watch("foo1", None).await?; tokio::time::sleep(Duration::from_secs(1)).await; client.put("foo1", "bar2", None).await?; client.delete("foo1", None).await?; let mut watch_count = 2; while let Some(resp) = stream.message().await? { println!("[{}] receive watch response", resp.watch_id()); println!("compact revision: {}", resp.compact_revision()); if resp.created() { println!("watcher created: {}", resp.watch_id()); } if resp.canceled() { watch_count -= 1; println!("watch canceled: {}", resp.watch_id()); if watch_count == 0 { break; } } for event in resp.events() { println!("event type: {:?}", event.event_type()); if let Some(kv) = event.kv() { println!("kv: {{{}: {}}}", kv.key_str()?, kv.value_str()?); } if EventType::Delete == event.event_type() { watcher.cancel_by_id(resp.watch_id()).await?; } } println!(); } Ok(()) }
27.634921
75
0.527283
e95365e63a803a21f2292ffb915d68c548f408f3
1,351
use once_cell::sync::Lazy; use regex::{Captures, Regex}; use std::collections::HashMap; use tera::{self, Value}; static RE_CODIFY: Lazy<Regex> = Lazy::new(|| Regex::new(r"\&#96;(.+?)\&#96;").unwrap()); pub fn codify(value: &Value, _: &HashMap<String, Value>) -> tera::Result<Value> { let value = match value { Value::String(s) => s, _ => return Err(format!("unsupported value for codify: {:?}", value).into()), }; let result = RE_CODIFY.replace_all(&value, |captures: &Captures| { format!("<code>{}</code>", captures.get(1).unwrap().as_str()) }); Ok(result.into()) } pub fn pr_url(value: &Value, _: &HashMap<String, Value>) -> tera::Result<Value> { let number = get_issue_number(value)?; Ok(format!("https://github.com/rust-lang/rust/pull/{}", number).into()) } pub fn issue_url(value: &Value, _: &HashMap<String, Value>) -> tera::Result<Value> { let number = get_issue_number(value)?; Ok(format!("https://github.com/rust-lang/rust/issues/{}", number).into()) } fn get_issue_number(value: &Value) -> tera::Result<u64> { let number = match value { Value::Number(n) => n .as_u64() .ok_or_else(|| format!("unsupport number: {:?}", n))?, _ => return Err(format!("unsupported value for issue number: {:?}", value).into()), }; Ok(number) }
35.552632
91
0.601036
7175e466cc6caab193537a26925839ad8f67adf1
4,935
use alloc::boxed::Box; use alloc::string::String; use alloc::vec::Vec; use core::any::Any; use crate::rsp::rsp::RSP; use crate::rsp::rsp_assembler::{E, Element, GPR, RSPAssembler, VR, VSARAccumulator}; use crate::rsp::spmem::SPMEM; use crate::tests::{Level, Test}; use crate::tests::soft_asserts::soft_assert_eq; fn run_test(e: Element, expected_result: [u16; 8], expected_acc_top: [u16; 8], expected_acc_mid: [u16; 8], expected_acc_low: [u16; 8]) -> Result<(), String> { // Prepare input data SPMEM::write_vector16_into_dmem(0x00, &[0x0000, 0x0000, 0x0000, 0xE000, 0x8001, 0x8000, 0x7FFF, 0x8000]); SPMEM::write_vector16_into_dmem(0x10, &[0x0000, 0x0001, 0xFFFF, 0xFFFF, 0x8000, 0x7FFF, 0x7FFF, 0x8000]); // Assemble RSP program. First use VMULF to set accumulator to something known, then use VMUDH let mut assembler = RSPAssembler::new(0); assembler.write_lqv(VR::V0, E::_0, 0x000, GPR::R0); assembler.write_lqv(VR::V1, E::_0, 0x010, GPR::R0); assembler.write_lqv(VR::V6, E::_0, 0x000, GPR::R0); assembler.write_lqv(VR::V7, E::_0, 0x010, GPR::R0); // Ensure something is in the lower accumulator as we expect VMUDH to clear it with zeroes assembler.write_vmulf(VR::V2, VR::V0, VR::V1, e); assembler.write_vmudh(VR::V2, VR::V0, VR::V1, e); assembler.write_vsar(VR::V3, VSARAccumulator::High); assembler.write_vsar(VR::V4, VSARAccumulator::Mid); assembler.write_vsar(VR::V5, VSARAccumulator::Low); // again but this time destructive by overwriting a source reg assembler.write_vmudh(VR::V6, VR::V6, VR::V1, e); assembler.write_vmudh(VR::V7, VR::V0, VR::V7, e); assembler.write_sqv(VR::V2, E::_0, 0x100, GPR::R0); assembler.write_sqv(VR::V3, E::_0, 0x110, GPR::R0); assembler.write_sqv(VR::V4, E::_0, 0x120, GPR::R0); assembler.write_sqv(VR::V5, E::_0, 0x130, GPR::R0); assembler.write_sqv(VR::V6, E::_0, 0x140, GPR::R0); assembler.write_sqv(VR::V7, E::_0, 0x150, GPR::R0); assembler.write_break(); RSP::run_and_wait(0); soft_assert_eq(SPMEM::read_vector16_from_dmem(0x100), expected_result, "VMUDH result")?; soft_assert_eq(SPMEM::read_vector16_from_dmem(0x110), expected_acc_top, "VMUDH Acc[32..48]")?; soft_assert_eq(SPMEM::read_vector16_from_dmem(0x120), expected_acc_mid, "VMUDH Acc[16..32]")?; soft_assert_eq(SPMEM::read_vector16_from_dmem(0x130), expected_acc_low, "VMUDH Acc[0..16]")?; soft_assert_eq(SPMEM::read_vector16_from_dmem(0x140), expected_result, "VMUDH result when doing VMUDH V6, V6, V1")?; soft_assert_eq(SPMEM::read_vector16_from_dmem(0x150), expected_result, "VMUDH result when doing VMUDH V7, V0, V7")?; Ok(()) } pub struct VMUDHAll {} impl Test for VMUDHAll { fn name(&self) -> &str { "RSP VMUDH" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { run_test( Element::All, [0, 0, 0, 0x2000, 0x7fff, 0x8000, 0x7fff, 0x7fff], [0, 0, 0, 0, 0x3fff, 0xc000, 0x3fff, 0x4000], [0, 0, 0, 0x2000, 0x8000, 0x8000, 0x1, 0x0], [0, 0, 0, 0, 0, 0, 0, 0], ) } } pub struct VMUDHQ1 {} impl Test for VMUDHQ1 { fn name(&self) -> &str { "RSP VMUDH (e=Q1)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { run_test( Element::Q1, [0, 0, 0x2000, 0x2000, 0x7FFF, 0x8000, 0x8000, 0x7fff], [0, 0, 0, 0, 0x4000, 0xc000, 0xc000, 0x4000], [0, 0, 0x2000, 0x2000, 0, 0x8000, 0x8000, 0], [0, 0, 0, 0, 0, 0, 0, 0], ) } } pub struct VMUDHH0 {} impl Test for VMUDHH0 { fn name(&self) -> &str { "RSP VMUDH (e=H0)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { run_test( Element::H0, [0, 0, 0, 0, 0x7FFF, 0x8000, 0x8000, 0x7FFF], [0, 0, 0, 0, 0x3FFF, 0xC000, 0xC000, 0x3FFF], [0, 0, 0, 0, 0x8000, 0xFFFF, 0xFFFF, 0x8000], [0, 0, 0, 0, 0, 0, 0, 0], ) } } pub struct VMUDH7 {} impl Test for VMUDH7 { fn name(&self) -> &str { "RSP VMUDH (e=_7)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { run_test( Element::_7, [0, 0x8000, 0x7fff, 0x7fff, 0x7fff, 0x8000, 0x8000, 0x7fff], [0, 0xffff, 0, 0, 0x4000, 0xc000, 0xc000, 0x4000], [0, 0x8000, 0x8000, 0x8000, 0, 0x8000, 0x8000, 0], [0, 0, 0, 0, 0, 0, 0, 0], ) } }
35.76087
158
0.606484
0111b05a57c8f4b539da02496f8fa0586cf4aadb
21,165
//! This provides the logic for the finalized and head chains. //! //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::PeerId; use eth2_libp2p::SyncInfo; use fnv::FnvHashMap; use slog::{crit, debug, error}; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; /// The number of head syncing chains to sync at a time. const PARALLEL_HEAD_CHAINS: usize = 2; /// Minimum work we require a finalized chain to do before picking a chain with more peers. const MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS: u64 = 10; /// The state of the long range/batch sync. #[derive(Clone)] pub enum RangeSyncState { /// A finalized chain is being synced. Finalized(u64), /// There are no finalized chains and we are syncing one more head chains. Head(SmallVec<[u64; PARALLEL_HEAD_CHAINS]>), /// There are no head or finalized chains and no long range sync is in progress. Idle, } /// A collection of finalized and head chains currently being processed. pub struct ChainCollection<T: BeaconChainTypes> { /// The beacon chain for processing. beacon_chain: Arc<BeaconChain<T>>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap<ChainId, SyncingChain<T>>, /// The set of head chains being synced. head_chains: FnvHashMap<ChainId, SyncingChain<T>>, /// The current sync state of the process. state: RangeSyncState, /// Logger for the collection. log: slog::Logger, } impl<T: BeaconChainTypes> ChainCollection<T> { pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), head_chains: FnvHashMap::default(), state: RangeSyncState::Idle, log, } } /// Updates the Syncing state of the collection after a chain is removed. fn on_chain_removed(&mut self, id: &ChainId, was_syncing: bool, sync_type: RangeSyncType) { let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) .map(|m| m.dec()); match self.state { RangeSyncState::Finalized(ref syncing_id) => { if syncing_id == id { // the finalized chain that was syncing was removed debug_assert!(was_syncing); let syncing_head_ids: SmallVec<[u64; PARALLEL_HEAD_CHAINS]> = self .head_chains .iter() .filter(|(_id, chain)| chain.is_syncing()) .map(|(id, _)| *id) .collect(); self.state = if syncing_head_ids.is_empty() { RangeSyncState::Idle } else { RangeSyncState::Head(syncing_head_ids) }; } else { debug_assert!(!was_syncing); } } RangeSyncState::Head(ref mut syncing_head_ids) => { if let Some(index) = syncing_head_ids .iter() .enumerate() .find(|(_, &chain_id)| &chain_id == id) .map(|(i, _)| i) { // a syncing head chain was removed debug_assert!(was_syncing); syncing_head_ids.swap_remove(index); if syncing_head_ids.is_empty() { self.state = RangeSyncState::Idle; } } else { debug_assert!(!was_syncing); } } RangeSyncState::Idle => { // the removed chain should not be syncing debug_assert!(!was_syncing) } } } /// Calls `func` on every chain of the collection. If the result is /// `ProcessingResult::RemoveChain`, the chain is removed and returned. /// NOTE: `func` must not change the syncing state of a chain. pub fn call_all<F>(&mut self, mut func: F) -> Vec<(SyncingChain<T>, RangeSyncType, RemoveChain)> where F: FnMut(&mut SyncingChain<T>) -> ProcessingResult, { let mut to_remove = Vec::new(); for (id, chain) in self.finalized_chains.iter_mut() { if let Err(remove_reason) = func(chain) { to_remove.push((*id, RangeSyncType::Finalized, remove_reason)); } } for (id, chain) in self.head_chains.iter_mut() { if let Err(remove_reason) = func(chain) { to_remove.push((*id, RangeSyncType::Head, remove_reason)); } } let mut results = Vec::with_capacity(to_remove.len()); for (id, sync_type, reason) in to_remove.into_iter() { let chain = match sync_type { RangeSyncType::Finalized => self.finalized_chains.remove(&id), RangeSyncType::Head => self.head_chains.remove(&id), }; let chain = chain.expect("Chain exists"); self.on_chain_removed(&id, chain.is_syncing(), sync_type); results.push((chain, sync_type, reason)); } results } /// Executes a function on the chain with the given id. /// /// If the function returns `ProcessingResult::RemoveChain`, the chain is removed and returned. /// If the chain is found, its syncing type is returned, or an error otherwise. /// NOTE: `func` should not change the sync state of a chain. #[allow(clippy::type_complexity)] pub fn call_by_id<F>( &mut self, id: ChainId, func: F, ) -> Result<(Option<(SyncingChain<T>, RemoveChain)>, RangeSyncType), ()> where F: FnOnce(&mut SyncingChain<T>) -> ProcessingResult, { if let Entry::Occupied(mut entry) = self.finalized_chains.entry(id) { // Search in our finalized chains first if let Err(remove_reason) = func(entry.get_mut()) { let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), RangeSyncType::Finalized); Ok((Some((chain, remove_reason)), RangeSyncType::Finalized)) } else { Ok((None, RangeSyncType::Finalized)) } } else if let Entry::Occupied(mut entry) = self.head_chains.entry(id) { // Search in our head chains next if let Err(remove_reason) = func(entry.get_mut()) { let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), RangeSyncType::Head); Ok((Some((chain, remove_reason)), RangeSyncType::Head)) } else { Ok((None, RangeSyncType::Head)) } } else { // Chain was not found in the finalized collection, nor the head collection Err(()) } } /// Updates the state of the chain collection. /// /// This removes any out-dated chains, swaps to any higher priority finalized chains and /// updates the state of the collection. This starts head chains syncing if any are required to /// do so. pub fn update( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T::EthSpec>>, ) { // Remove any outdated finalized/head chains self.purge_outdated_chains(local, awaiting_head_peers); let local_head_epoch = local.head_slot.epoch(T::EthSpec::slots_per_epoch()); // Choose the best finalized chain if one needs to be selected. self.update_finalized_chains(network, local.finalized_epoch, local_head_epoch); if !matches!(self.state, RangeSyncState::Finalized(_)) { // Handle head syncing chains if there are no finalized chains left. self.update_head_chains( network, local.finalized_epoch, local_head_epoch, awaiting_head_peers, beacon_processor_send, ); } } pub fn state( &self, ) -> Result<Option<(RangeSyncType, Slot /* from */, Slot /* to */)>, &'static str> { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self .finalized_chains .get(syncing_id) .ok_or("Finalized syncing chain not found")?; Ok(Some(( RangeSyncType::Finalized, chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), chain.target_head_slot, ))) } RangeSyncState::Head(ref syncing_head_ids) => { let mut range: Option<(Slot, Slot)> = None; for id in syncing_head_ids { let chain = self .head_chains .get(id) .ok_or("Head syncing chain not found")?; let start = chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()); let target = chain.target_head_slot; range = range .map(|(min_start, max_slot)| (min_start.min(start), max_slot.max(target))) .or(Some((start, target))); } let (start_slot, target_slot) = range.ok_or("Syncing head with empty head ids")?; Ok(Some((RangeSyncType::Head, start_slot, target_slot))) } RangeSyncState::Idle => Ok(None), } } /// This looks at all current finalized chains and decides if a new chain should be prioritised /// or not. fn update_finalized_chains( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local_epoch: Epoch, local_head_epoch: Epoch, ) { // Find the chain with most peers and check if it is already syncing if let Some((mut new_id, max_peers)) = self .finalized_chains .iter() .max_by_key(|(_, chain)| chain.available_peers()) .map(|(id, chain)| (*id, chain.available_peers())) { let mut old_id = None; if let RangeSyncState::Finalized(syncing_id) = self.state { if syncing_id == new_id { // best chain is already syncing old_id = Some(None); } else { // chains are different, check that they don't have the same number of peers if let Some(syncing_chain) = self.finalized_chains.get_mut(&syncing_id) { if max_peers > syncing_chain.available_peers() && syncing_chain.validated_epochs() > MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS { syncing_chain.stop_syncing(); old_id = Some(Some(syncing_id)); } else { // chains have the same number of peers, pick the currently syncing // chain to avoid unnecesary switchings and try to advance it new_id = syncing_id; old_id = Some(None); } } } } let chain = self .finalized_chains .get_mut(&new_id) .expect("Chain exists"); match old_id { Some(Some(old_id)) => debug!(self.log, "Switching finalized chains"; "old_id" => old_id, &chain), None => debug!(self.log, "Syncing new finalized chain"; &chain), Some(None) => { // this is the same chain. We try to advance it. } } // update the state to a new finalized state self.state = RangeSyncState::Finalized(new_id); if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { if remove_reason.is_critical() { crit!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); } else { // this happens only if sending a batch over the `network` fails a lot error!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); } self.finalized_chains.remove(&new_id); self.on_chain_removed(&new_id, true, RangeSyncType::Finalized); } } } /// Start syncing any head chains if required. fn update_head_chains( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local_epoch: Epoch, local_head_epoch: Epoch, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T::EthSpec>>, ) { // Include the awaiting head peers for (peer_id, peer_sync_info) in awaiting_head_peers.drain() { debug!(self.log, "including head peer"); self.add_peer_or_create_chain( local_epoch, peer_sync_info.head_root, peer_sync_info.head_slot, peer_id, RangeSyncType::Head, beacon_processor_send, network, ); } if self.head_chains.is_empty() { // There are no finalized chains, update the state. self.state = RangeSyncState::Idle; return; } // Order chains by available peers, if two chains have the same number of peers, prefer one // that is already syncing let mut preferred_ids = self .head_chains .iter() .map(|(id, chain)| (chain.available_peers(), !chain.is_syncing(), *id)) .collect::<Vec<_>>(); preferred_ids.sort_unstable(); let mut syncing_chains = SmallVec::<[u64; PARALLEL_HEAD_CHAINS]>::new(); for (_, _, id) in preferred_ids { let chain = self.head_chains.get_mut(&id).expect("known chain"); if syncing_chains.len() < PARALLEL_HEAD_CHAINS { // start this chain if it's not already syncing if !chain.is_syncing() { debug!(self.log, "New head chain started syncing"; &chain); } if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { self.head_chains.remove(&id); if remove_reason.is_critical() { crit!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); } else { error!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); } } else { syncing_chains.push(id); } } else { // stop any other chain chain.stop_syncing(); } } self.state = if syncing_chains.is_empty() { RangeSyncState::Idle } else { RangeSyncState::Head(syncing_chains) }; } /// Returns if `true` if any finalized chains exist, `false` otherwise. pub fn is_finalizing_sync(&self) -> bool { !self.finalized_chains.is_empty() } /// Removes any outdated finalized or head chains. /// This removes chains with no peers, or chains whose start block slot is less than our current /// finalized block slot. Peers that would create outdated chains are removed too. pub fn purge_outdated_chains( &mut self, local_info: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, ) { let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); let beacon_chain = &self.beacon_chain; let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { target_slot <= &local_finalized_slot || beacon_chain.fork_choice.read().contains_block(target_root) }; // Retain only head peers that remain relevant awaiting_head_peers.retain(|_peer_id, peer_sync_info| { !is_outdated(&peer_sync_info.head_slot, &peer_sync_info.head_root) }); // Remove chains that are out-dated let mut removed_chains = Vec::new(); self.finalized_chains.retain(|id, chain| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of finalized chain"; &chain); removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Finalized)); false } else { true } }); self.head_chains.retain(|id, chain| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of date head chain"; &chain); removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Head)); false } else { true } }); // update the state of the collection for (id, was_syncing, sync_type) in removed_chains { self.on_chain_removed(&id, was_syncing, sync_type); } } /// Adds a peer to a chain with the given target, or creates a new syncing chain if it doesn't /// exists. #[allow(clippy::too_many_arguments)] pub fn add_peer_or_create_chain( &mut self, start_epoch: Epoch, target_head_root: Hash256, target_head_slot: Slot, peer: PeerId, sync_type: RangeSyncType, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T::EthSpec>>, network: &mut SyncNetworkContext<T::EthSpec>, ) { let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot); let collection = if let RangeSyncType::Finalized = sync_type { &mut self.finalized_chains } else { &mut self.head_chains }; match collection.entry(id) { Entry::Occupied(mut entry) => { let chain = entry.get_mut(); debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain); debug_assert_eq!(chain.target_head_root, target_head_root); debug_assert_eq!(chain.target_head_slot, target_head_slot); if let Err(remove_reason) = chain.add_peer(network, peer) { if remove_reason.is_critical() { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } else { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), sync_type); } } Entry::Vacant(entry) => { let peer_rpr = peer.to_string(); let new_chain = SyncingChain::new( start_epoch, target_head_slot, target_head_root, peer, beacon_processor_send.clone(), &self.log, ); debug_assert_eq!(new_chain.get_id(), id); debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); entry.insert(new_chain); let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) .map(|m| m.inc()); } } } }
41.418787
129
0.550579
ffefe5d8fa2c5cbc1681e4d091b1a437c5c964b6
217
#![feature(test)] #[cfg(test)] mod tests { extern crate test; use test::Bencher; use learn_rust; #[bench] fn bench_add_two(b: &mut Bencher) { b.iter(|| learn_rust::add_two(2)); } }
13.5625
42
0.562212
e91a63819d71771734a703df145c62dbce343e9b
1,162
/* * The `use` declaration * * The `use` declaration can be used to bind a full path to a new name, for * easier access. It is often used like this: * * ``` * use crate::deeply::nested::{ * my_first_function, * my_second_function, * AndATraitType * }; * * fn main() { * my_first_function(); * } * ``` */ // You can use the `as` keyword to bind imports to a different name: // Bind the `deeply::nested::function` path to `other_function`. use deeply::nested::function as other_function; fn function() { println!("called `function()`"); } mod deeply { pub mod nested { pub fn function() { println!("called `deeply::nested::function()`"); } } } fn main() { // Easier access to `deeply::nested::function` other_function(); println!("Entering block"); { // This is equivalent to `use deeply::nested::function as function`. // This `function()` will shadow the outer one. use crate::deeply::nested::function; // `use` bindings have a local scope. In this case, the shadowing of // `function()` is only in this block. function(); println!("Leaving block"); } function(); }
20.75
75
0.623924
232f55ae5171c3a48157107c1c40c84928035845
6,003
use crate::errors::AppError; use crate::ConnType; use crate::models::schema::ionets; use crate::models::schema::ionets::dsl::{created_at, host_uuid, ionets as dsl_ionets}; use crate::models::{get_granularity, HttpPostHost}; use diesel::{ sql_types::{Int8, Text, Timestamp}, *, }; use serde::{Deserialize, Serialize}; /// DB Specific struct for ionets table #[derive(Identifiable, Queryable, Debug, Serialize, Deserialize)] #[table_name = "ionets"] pub struct IoNet { pub id: i64, pub interface: String, pub rx_bytes: i64, pub rx_packets: i64, pub rx_errs: i64, pub rx_drop: i64, pub tx_bytes: i64, pub tx_packets: i64, pub tx_errs: i64, pub tx_drop: i64, pub host_uuid: String, pub created_at: chrono::NaiveDateTime, } impl IoNet { /// Return a Vector of IoNet /// # Params /// * `conn` - The r2d2 connection needed to fetch the data from the db /// * `uuid` - The host's uuid we want to get IoNet of /// * `size` - The number of elements to fetch /// * `page` - How many items you want to skip (page * size) pub fn get_data( conn: &ConnType, uuid: &str, size: i64, page: i64, ) -> Result<Vec<Self>, AppError> { Ok(dsl_ionets .filter(host_uuid.eq(uuid)) .limit(size) .offset(page * size) .order_by(created_at.desc()) .load(conn)?) } /// Return a Vector of IoNet between min_date and max_date /// # Params /// * `conn` - The r2d2 connection needed to fetch the data from the db /// * `uuid` - The host's uuid we want to get IoNet of /// * `size` - The number of elements to fetch /// * `min_date` - Min timestamp for the data to be fetched /// * `max_date` - Max timestamp for the data to be fetched pub fn get_data_dated( conn: &ConnType, uuid: &str, min_date: chrono::NaiveDateTime, max_date: chrono::NaiveDateTime, ) -> Result<Vec<IoNetDTORaw>, AppError> { let size = (max_date - min_date).num_seconds(); let granularity = get_granularity(size); // Dummy require to ensure no issue if table name change. // If the table's name is to be changed, we have to change it from the sql_query below. { #[allow(unused_imports)] use crate::models::schema::ionets; } // Prepare and run the query Ok(sql_query(format!( " WITH s AS ( SELECT interface, avg(rx_bytes)::int8 as rx_bytes, avg(tx_bytes)::int8 as tx_bytes, time_bucket('{}s', created_at) as time FROM ionets WHERE host_uuid=$1 AND created_at BETWEEN $2 AND $3 GROUP BY time,interface ORDER BY time DESC ) SELECT interface, rx_bytes, tx_bytes, time as created_at FROM s", granularity )) .bind::<Text, _>(uuid) .bind::<Timestamp, _>(min_date) .bind::<Timestamp, _>(max_date) .load(conn)?) } /// Return the numbers of IoNet the host have /// # Params /// * `conn` - The r2d2 connection needed to fetch the data from the db /// * `uuid` - The host's uuid we want to get the number of IoNet of /// * `size` - The number of elements to fetch pub fn count(conn: &ConnType, uuid: &str, size: i64) -> Result<i64, AppError> { // Dummy require to ensure no issue if table name change. // If the table's name is to be changed, we have to change it from the sql_query below. { #[allow(unused_imports)] use crate::models::schema::ionets; } let res = sql_query( " WITH s AS (SELECT id, interface, created_at FROM ionets WHERE host_uuid=$1 ORDER BY created_at DESC LIMIT $2 ) SELECT COUNT(DISTINCT interface) FROM s", ) .bind::<Text, _>(uuid) .bind::<Int8, _>(size) .load::<IoNetCount>(conn)?; if res.is_empty() { Ok(0) } else { Ok(res[0].count) } } } #[derive(Queryable, QueryableByName, Serialize)] #[table_name = "ionets"] pub struct IoNetDTORaw { pub interface: String, pub rx_bytes: i64, pub tx_bytes: i64, pub created_at: chrono::NaiveDateTime, } #[derive(Queryable, QueryableByName, Serialize)] pub struct IoNetCount { #[sql_type = "Int8"] pub count: i64, } // ================ // Insertable model // ================ #[derive(Insertable)] #[table_name = "ionets"] pub struct IoNetDTO<'a> { pub interface: &'a str, pub rx_bytes: i64, pub rx_packets: i64, pub rx_errs: i64, pub rx_drop: i64, pub tx_bytes: i64, pub tx_packets: i64, pub tx_errs: i64, pub tx_drop: i64, pub host_uuid: &'a str, pub created_at: chrono::NaiveDateTime, } impl<'a> IoNetDTO<'a> { pub fn cfrom(item: &'a HttpPostHost, huuid: &'a str) -> Option<Vec<IoNetDTO<'a>>> { let ionets = item.ionets.as_ref()?; let mut list = Vec::with_capacity(ionets.len()); for iocounter in ionets { list.push(Self { interface: &iocounter.interface, rx_bytes: iocounter.rx_bytes, rx_packets: iocounter.rx_packets, rx_errs: iocounter.rx_errs, rx_drop: iocounter.rx_drop, tx_bytes: iocounter.tx_bytes, tx_packets: iocounter.tx_packets, tx_errs: iocounter.tx_errs, tx_drop: iocounter.tx_drop, host_uuid: huuid, created_at: item.created_at, }) } Some(list) } }
30.472081
95
0.550058
f92fae6d63763d6b1cbb4afb0d577deec850813d
3,104
//! # twilight-model //! //! [![codecov badge][]][codecov link] [![discord badge][]][discord link] [![github badge][]][github link] [![license badge][]][license link] ![rust badge] //! //! See the [`twilight`] documentation for more information. //! //! `twilight-model` is a crate of serde models defining the Discord APIs with //! few convenience methods on top of them. //! //! These are in a single crate for ease of use, a single point of definition, //! and a sort of versioning of the Discord API. Similar to how a database //! schema progresses in versions, the definition of the API also progresses in //! versions. //! //! The types in this crate are reproducible: deserializing a payload into a //! type, serializing it, and then deserializing it again will work. //! //! Defined are a number of modules defining types returned by or owned by //! resource categories. For example, `gateway` are types used to interact with //! and returned by the gateway API. `guild` contains types owned by the Guild //! resource category. These types may be directly returned by, built on top of, //! or extended by other crates. //! //! Some models have associated builders, which can be found in the //! [`twilight-util`] crate. //! //! ## Features //! //! ### Tracing //! //! The `tracing` feature enables logging via the [`tracing`] crate. //! //! This is enabled by default. //! //! ## License //! //! [ISC][LICENSE.md] //! //! [LICENSE.md]: https://github.com/twilight-rs/twilight/blob/main/LICENSE.md //! [`tracing`]: https://crates.io/crates/tracing //! [`twilight-util`]: https://docs.rs/twilight-util //! [`twilight`]: https://docs.rs/twilight //! [codecov badge]: https://img.shields.io/codecov/c/gh/twilight-rs/twilight?logo=codecov&style=for-the-badge&token=E9ERLJL0L2 //! [codecov link]: https://app.codecov.io/gh/twilight-rs/twilight/ //! [discord badge]: https://img.shields.io/discord/745809834183753828?color=%237289DA&label=discord%20server&logo=discord&style=for-the-badge //! [discord link]: https://discord.gg/7jj8n7D //! [github badge]: https://img.shields.io/badge/github-twilight-6f42c1.svg?style=for-the-badge&logo=github //! [github link]: https://github.com/twilight-rs/twilight //! [license badge]: https://img.shields.io/badge/license-ISC-blue.svg?style=for-the-badge&logo=pastebin //! [license link]: https://github.com/twilight-rs/twilight/blob/main/LICENSE.md //! [rust badge]: https://img.shields.io/badge/rust-1.57+-93450a.svg?style=for-the-badge&logo=rust #![deny( clippy::all, clippy::missing_const_for_fn, clippy::pedantic, future_incompatible, nonstandard_style, rust_2018_idioms, rustdoc::broken_intra_doc_links, unsafe_code, unused, warnings )] #![allow( clippy::module_name_repetitions, clippy::must_use_candidate, clippy::semicolon_if_nothing_returned )] pub mod application; pub mod channel; pub mod datetime; pub mod gateway; pub mod guild; pub mod id; pub mod invite; pub mod oauth; pub mod scheduled_event; pub mod template; pub mod user; pub mod util; pub mod voice; mod visitor; #[cfg(test)] mod test;
34.876404
155
0.709729
4adcb4aed918e29650a6c0ac1d46374bbeb386ec
6,003
use imgui::*; mod support; fn main() { let system = support::init(file!()); let mut buffers = vec![String::default(), String::default(), String::default()]; system.main_loop(move |_, ui| { Window::new("Input text callbacks") .size([500.0, 300.0], Condition::FirstUseEver) .build(ui, || { ui.text("You can make a variety of buffer callbacks on an Input Text"); ui.text( "or on an InputTextMultiline. In this example, we'll use \ InputText primarily.", ); ui.text( "The only difference is that InputTextMultiline doesn't get \ the `History` callback,", ); ui.text("since, of course, you need the up/down keys to navigate."); ui.separator(); ui.text("No callbacks:"); ui.input_text("buf0", &mut buffers[0]).build(); ui.input_text("buf1", &mut buffers[1]).build(); ui.input_text("buf2", &mut buffers[2]).build(); ui.separator(); ui.text("Here's a callback which printlns when each is ran."); struct AllCallback; impl InputTextCallbackHandler for AllCallback { fn char_filter(&mut self, c: char) -> Option<char> { println!("Char filter fired! This means a char was inputted."); Some(c) } fn on_completion(&mut self, _: TextCallbackData<'_>) { println!("Completion request fired! This means the tab key was hit."); } fn on_edit(&mut self, _: TextCallbackData<'_>) { println!("Edit was fired! Any edit will cause this to fire.") } fn on_history(&mut self, dir: HistoryDirection, _: TextCallbackData<'_>) { println!("History was fired by pressing {:?}", dir); } fn on_always(&mut self, _: TextCallbackData<'_>) { // We don't actually print this out because it will flood your log a lot! // println!("The always callback fired! It always fires."); } } ui.input_text("All Callbacks logging", buffers.get_mut(0).unwrap()) .callback(InputTextCallback::all(), AllCallback) .build(); ui.separator(); ui.text("You can also define a callback on structs with data."); ui.text("Here we implement the callback handler on a wrapper around &mut String"); ui.text("to duplicate edits to buf0 on buf1"); struct Wrapper<'a>(&'a mut String); impl<'a> InputTextCallbackHandler for Wrapper<'a> { fn on_always(&mut self, data: TextCallbackData<'_>) { *self.0 = data.str().to_owned(); } } let (buf0, brwchk_dance) = buffers.split_first_mut().unwrap(); let buf1 = Wrapper(&mut brwchk_dance[0]); ui.input_text("Edits copied to buf1", buf0) .callback(InputTextCallback::ALWAYS, buf1) .build(); ui.separator(); ui.text("Finally, we'll do some whacky history to show inserting and removing"); ui.text("characters from the buffer."); ui.text( "Here, pressing UP (while editing the below widget) will remove the\n\ first and last character from buf2", ); ui.text("and pressing DOWN will prepend the first char from buf0 AND"); ui.text("append the last char from buf1"); let (buf0, brwchk_dance) = buffers.split_first_mut().unwrap(); let (buf1, buf2_dance) = brwchk_dance.split_first_mut().unwrap(); let buf2 = &mut buf2_dance[0]; struct Wrapper2<'a>(&'a str, &'a str); impl<'a> InputTextCallbackHandler for Wrapper2<'a> { fn on_history( &mut self, dir: HistoryDirection, mut data: TextCallbackData<'_>, ) { match dir { HistoryDirection::Up => { // remove first char... if !data.str().is_empty() { data.remove_chars(0, 1); if let Some((idx, _)) = data.str().char_indices().rev().next() { data.remove_chars(idx, 1); } } } HistoryDirection::Down => { // insert first char... if let Some(first_char) = self.0.get(0..1) { data.insert_chars(0, first_char); } // insert last char if let Some((idx, _)) = self.1.char_indices().rev().next() { data.push_str(&self.1[idx..]); } } } } } ui.input_text("Wild buf2 editor", buf2) .callback(InputTextCallback::HISTORY, Wrapper2(buf0, buf1)) .build(); ui.text( "For more examples on how to use callbacks non-chaotically, check the demo", ); }); }); }
41.979021
100
0.441946
e67e035d1de5302e10eb99d253c2cc97c530d81c
1,355
use crate::{c_component::CModel, ARoute, AppRoute}; use yew::{prelude::*, virtual_dom::VNode, Properties}; use yew_router::{prelude::*, switch::AllowMissing}; pub struct AModel { props: Props, } #[derive(Clone, PartialEq, Properties)] pub struct Props { pub route: Option<ARoute>, } pub enum Msg {} impl Component for AModel { type Message = Msg; type Properties = Props; fn create(props: Self::Properties, _link: ComponentLink<Self>) -> Self { AModel { props } } fn update(&mut self, _msg: Self::Message) -> ShouldRender { true } fn change(&mut self, props: Self::Properties) -> ShouldRender { self.props = props; true } fn view(&self) -> VNode { html! { <div> { "I am the A component" } <div> <RouterButton<AppRoute> route=AppRoute::A(AllowMissing(Some(ARoute))) /> // {"Go to a/c"} // </RouterButton<AppRoute>> </div> <div> { match self.props.route { Some(_) => html!{<CModel/>}, None => html!{} } } </div> </div> } } }
24.196429
76
0.458303
e9ef74571e6259e5da98dcd05a7425bb7c5ebd2d
6,359
#![doc = "generated by AutoRust"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct AvailableOperations { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<OperationDetail>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } impl AvailableOperations { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct DiagnosticsInfoDto { #[serde(rename = "ms-cv", default, skip_serializing_if = "Option::is_none")] pub ms_cv: Option<String>, #[serde(rename = "buildNumber", default, skip_serializing_if = "Option::is_none")] pub build_number: Option<String>, } impl DiagnosticsInfoDto { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<error_response::Error>, } impl ErrorResponse { pub fn new() -> Self { Self::default() } } pub mod error_response { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Error { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } impl Error { pub fn new() -> Self { Self::default() } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Notification { #[serde(flatten)] pub resource: Resource, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OfferProperties>, } impl Notification { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct NotificationList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Notification>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } impl NotificationList { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct OfferProperties { #[serde(rename = "offerId", default, skip_serializing_if = "Option::is_none")] pub offer_id: Option<String>, #[serde(rename = "createdDate", default, skip_serializing_if = "Option::is_none")] pub created_date: Option<String>, #[serde(rename = "offerDisplayName", default, skip_serializing_if = "Option::is_none")] pub offer_display_name: Option<String>, #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, } impl OfferProperties { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct OperationDetail { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")] pub is_data_action: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<OperationDisplay>, #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, } impl OperationDetail { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct OperationDisplay { #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } impl OperationDisplay { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } impl Resource { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct SystemData { #[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")] pub created_by: Option<String>, #[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")] pub created_by_type: Option<system_data::CreatedByType>, #[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")] pub created_at: Option<String>, #[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, #[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")] pub last_modified_by_type: Option<system_data::LastModifiedByType>, #[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")] pub last_modified_at: Option<String>, } impl SystemData { pub fn new() -> Self { Self::default() } } pub mod system_data { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CreatedByType { User, Application, ManagedIdentity, Key, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LastModifiedByType { User, Application, ManagedIdentity, Key, } }
35.132597
93
0.67086
ef7308a3c74e07cb83933e23fce1ec5817ff1113
2,462
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::S3ndtr { #[doc = r" Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } } #[doc = r" Value of the field"] pub struct NdtR { bits: u16, } impl NdtR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u16 { self.bits } } #[doc = r" Proxy"] pub struct _NdtW<'a> { w: &'a mut W, } impl<'a> _NdtW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u16) -> &'a mut W { const MASK: u16 = 65535; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:15 - Number of data items to transfer"] #[inline(always)] pub fn ndt(&self) -> NdtR { let bits = { const MASK: u16 = 65535; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u16 }; NdtR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline(always)] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:15 - Number of data items to transfer"] #[inline(always)] pub fn ndt(&mut self) -> _NdtW { _NdtW { w: self } } }
24.376238
59
0.500406
fcb3fe2ba09367ba0daae665cccaa75c1d065243
7,725
use crate::action::StoredAction; use anyhow::{anyhow, Result}; use rand::{thread_rng, Rng}; use serde_json::{json, Value}; use std::convert::TryInto; pub const SYNC_API_VERSION: u64 = 1; const SYNC_KEY_CHARS: [char; 32] = [ '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ]; const SYNC_KEY_LENGTH: usize = 25; const SYNC_KEY_GROUPING: usize = 5; const SYNC_KEY_VALIDATION_BITS: usize = 15; #[derive(Clone, Debug)] pub struct SyncRequest { pub sync_key: String, pub sync_id: u32, pub upload: Option<Vec<StoredAction>>, } #[derive(Clone, Debug)] pub struct SyncResponse { pub new_sync_id: u32, pub new_actions: Vec<StoredAction>, pub more_actions: bool, pub uploaded: usize, } impl SyncRequest { pub fn new_sync_key() -> String { // Generate a random sync identifier let id_range = (SYNC_KEY_CHARS.len() as u128).pow(SYNC_KEY_LENGTH as u32) >> SYNC_KEY_VALIDATION_BITS; let id: u128 = thread_rng().gen_range(0..id_range); // Compute checksum digits for validation and combine into the sync key value let checksum = Self::sync_key_checksum(id); let mut value = (id << SYNC_KEY_VALIDATION_BITS) + checksum; // Generate sync key characters from its integer value (base 32) let mut chars = Vec::new(); for _ in 0..SYNC_KEY_LENGTH { chars.push(SYNC_KEY_CHARS[(value % SYNC_KEY_CHARS.len() as u128) as usize]); value /= SYNC_KEY_CHARS.len() as u128; } chars.reverse(); // Split characters into groups for visual aid let groups: Vec<String> = chars .as_slice() .chunks(SYNC_KEY_GROUPING) .map(|chunk| chunk.iter().collect()) .collect(); groups.join("-") } pub fn validate_sync_key(key: &str) -> Option<String> { // Unify sync key format by getting rid of whitespace and dashes, replacing commonly // mistaken characters, and forcing uppercase let key = key.trim().to_uppercase().replace("-", "").replace("I", "1"); if key.len() != SYNC_KEY_LENGTH { return None; } // Compute integer value from sync key characters (base 32) let mut value: u128 = 0; for ch in key.chars() { match SYNC_KEY_CHARS.binary_search(&ch) { Ok(idx) => value = value * SYNC_KEY_CHARS.len() as u128 + idx as u128, _ => return None, }; } // Split sync key value into sync key identifier and checksum let id = value >> SYNC_KEY_VALIDATION_BITS; let checksum = value & ((1 << SYNC_KEY_VALIDATION_BITS) - 1); // Validate checksum if checksum != Self::sync_key_checksum(id) { return None; } let chars: Vec<char> = key.chars().collect(); let groups: Vec<String> = chars .as_slice() .chunks(SYNC_KEY_GROUPING) .map(|chunk| chunk.iter().collect()) .collect(); Some(groups.join("-")) } fn sync_key_checksum(id: u128) -> u128 { // Hash the bytes of the identifier with a 32 bit hash and return // the bottom bits as the validation checksum let mut value = id; let mut checksum: u32 = 0; for _ in 0..(128 / 8) { checksum = checksum.wrapping_add((value & 0xff) as u32); checksum = checksum.wrapping_add(checksum.wrapping_shl(10)); checksum ^= checksum >> 6; value >>= 8; } checksum = checksum.wrapping_add(checksum.wrapping_shl(3)); checksum ^= checksum >> 11; checksum = checksum.wrapping_add(checksum.wrapping_shl(15)); (checksum & ((1 << SYNC_KEY_VALIDATION_BITS) - 1)) as u128 } pub fn fetch(sync_key: String, sync_id: u32) -> Self { Self { sync_key, sync_id, upload: None, } } pub fn upload(sync_key: String, sync_id: u32, actions: Vec<StoredAction>) -> Self { Self { sync_key, sync_id, upload: Some(actions), } } pub fn serialize(&self) -> Result<Value> { Ok(match &self.upload { Some(upload) => { let upload = base64::encode(StoredAction::serialize_list(upload)); json!({ "api_version": SYNC_API_VERSION, "sync_key": self.sync_key, "sync_id": self.sync_id, "upload": upload }) } None => { json!({ "api_version": SYNC_API_VERSION, "sync_key": self.sync_key, "sync_id": self.sync_id }) } }) } pub fn deserialize(request: Value) -> Result<Self> { let sync_key = Self::validate_sync_key( request .get("sync_key") .ok_or_else(|| anyhow!("Missing sync key"))? .as_str() .ok_or_else(|| anyhow!("Sync key is not a string"))?, ) .ok_or_else(|| anyhow!("Invalid sync key"))?; let sync_id: u32 = request .get("sync_id") .ok_or_else(|| anyhow!("Missing sync ID"))? .as_u64() .ok_or_else(|| anyhow!("Sync ID is not an integer"))? .try_into()?; let upload = match request.get("upload") { Some(data) => Some(StoredAction::deserialize_list(&base64::decode( data.as_str() .ok_or_else(|| anyhow!("Upload data is not a base64 string"))?, )?)?), None => None, }; Ok(Self { sync_key, sync_id, upload, }) } } impl SyncResponse { pub fn serialize(&self) -> Result<Value> { if self.new_actions.len() == 0 { Ok(json!({ "sync_id": self.new_sync_id, "uploaded": self.uploaded })) } else { let new_data = base64::encode(StoredAction::serialize_list(&self.new_actions)); Ok(json!({ "sync_id": self.new_sync_id, "data": new_data, "more": self.more_actions, "uploaded": self.uploaded })) } } pub fn deserialize(response: Value) -> Result<Self> { let new_sync_id: u32 = response .get("sync_id") .ok_or_else(|| anyhow!("Missing sync ID"))? .as_u64() .ok_or_else(|| anyhow!("Sync ID is not an integer"))? .try_into()?; let new_actions = match response.get("data") { Some(data) => StoredAction::deserialize_list(&base64::decode( data.as_str() .ok_or_else(|| anyhow!("Data is not a base64 string"))?, )?)?, None => Vec::new(), }; let more_actions = match response.get("more") { Some(more) => more .as_bool() .ok_or_else(|| anyhow!("More actions flag is not a bool"))?, None => false, }; let uploaded = response .get("uploaded") .ok_or_else(|| anyhow!("Missing upload count"))? .as_u64() .ok_or_else(|| anyhow!("Upload count is not an integer"))? .try_into()?; Ok(Self { new_sync_id, new_actions, more_actions, uploaded, }) } }
32.87234
99
0.517411
0ec507b3c5b255578e7e9d7c0ee5f842f28c5f55
17,563
use std::io::BufRead; enum BootstrapReadiness { AlreadyBootstrapped, NeedBootstrapAndApprove, NeedManualCleanup, } const EXIT_CODE_TESTS_PASSED: i32 = 0; const EXIT_CODE_TESTS_FAILED: i32 = 1; const EXIT_CODE_NEED_MANUAL_CLEANUP: i32 = 2; const EXIT_CODE_BUILD_FAILED: i32 = 3; fn main() { println!("Ensuring all binaries are built..."); let build_result = build_workspace(); match build_result { Ok(()) => { println!("Build succeeded"); } Err(e) => { eprintln!("{}", e); eprintln!("Build FAILED"); std::process::exit(EXIT_CODE_BUILD_FAILED); } } println!("Preparing for bootstrap..."); let readiness = prepare_for_bootstrap(); match readiness { BootstrapReadiness::AlreadyBootstrapped => { println!("Already bootstrapped"); } BootstrapReadiness::NeedBootstrapAndApprove => { println!("Bootstrap required"); } BootstrapReadiness::NeedManualCleanup => { eprintln!("Bootstrap directory and CSRs need manual clean up"); std::process::exit(EXIT_CODE_NEED_MANUAL_CLEANUP); } } if matches!(readiness, BootstrapReadiness::NeedBootstrapAndApprove) { println!("Running bootstrap script..."); let bootstrap_result = run_bootstrap(); match bootstrap_result { Ok(()) => { println!("Bootstrap script succeeded"); } Err(e) => { eprintln!("Running bootstrap script failed: {}", e); std::process::exit(EXIT_CODE_NEED_MANUAL_CLEANUP); } } } let test_result = run_tests(readiness); println!("All complete"); let exit_code = match test_result { Ok(()) => EXIT_CODE_TESTS_PASSED, Err(_) => EXIT_CODE_TESTS_FAILED, }; std::process::exit(exit_code); } fn config_dir() -> std::path::PathBuf { let home_dir = dirs::home_dir().expect("Can't get home dir"); // TODO: allow override of config dir home_dir.join(".krustlet/config") } fn config_file_path_str(file_name: impl AsRef<std::path::Path>) -> String { config_dir().join(file_name).to_str().unwrap().to_owned() } fn build_workspace() -> anyhow::Result<()> { let build_result = std::process::Command::new("cargo") .args(&["build"]) .output()?; if build_result.status.success() { Ok(()) } else { Err(anyhow::anyhow!( "{}", String::from_utf8(build_result.stderr).unwrap() )) } } fn prepare_for_bootstrap() -> BootstrapReadiness { let host_name = hostname::get() .expect("Can't get host name") .into_string() .expect("Can't get host name"); let cert_paths: Vec<_> = vec![ "krustlet-wasi.crt", "krustlet-wasi.key", "krustlet-wascc.crt", "krustlet-wascc.key", ] .iter() .map(|f| config_dir().join(f)) .collect(); let status = all_or_none(cert_paths); match status { AllOrNone::AllExist => { return BootstrapReadiness::AlreadyBootstrapped; } AllOrNone::NoneExist => (), AllOrNone::Error => { return BootstrapReadiness::NeedManualCleanup; } }; // We are not bootstrapped, but there may be existing CSRs around // TODO: allow override of host names let wasi_host_name = &host_name; let wascc_host_name = &host_name; let wasi_cert_name = format!("{}-tls", wasi_host_name); let wascc_cert_name = format!("{}-tls", wascc_host_name); let csr_spawn_deletes: Vec<_> = vec![ "krustlet-wasi", "krustlet-wascc", &wasi_cert_name, &wascc_cert_name, ] .iter() .map(delete_csr) .collect(); let (csr_deletions, csr_spawn_delete_errors) = csr_spawn_deletes.partition_success(); if !csr_spawn_delete_errors.is_empty() { return BootstrapReadiness::NeedManualCleanup; } let csr_deletion_results: Vec<_> = csr_deletions .into_iter() .map(|c| c.wait_with_output()) .collect(); let (csr_deletion_outputs, csr_run_deletion_failures) = csr_deletion_results.partition_success(); if !csr_run_deletion_failures.is_empty() { return BootstrapReadiness::NeedManualCleanup; } if csr_deletion_outputs.iter().any(|o| !is_resource_gone(o)) { return BootstrapReadiness::NeedManualCleanup; } // We have now deleted all the local certificate files, and all the CSRs that // might get in the way of our re-bootstrapping. Let the caller know they // will need to re-approve once the new CSRs come up. BootstrapReadiness::NeedBootstrapAndApprove } enum AllOrNone { AllExist, NoneExist, Error, } fn all_or_none(files: Vec<std::path::PathBuf>) -> AllOrNone { let (exist, missing): (Vec<_>, Vec<_>) = files.iter().partition(|f| f.exists()); if missing.is_empty() { return AllOrNone::AllExist; } for f in exist { if matches!(std::fs::remove_file(f), Err(_)) { return AllOrNone::Error; } } AllOrNone::NoneExist } fn delete_csr(csr_name: impl AsRef<str>) -> std::io::Result<std::process::Child> { std::process::Command::new("kubectl") .args(&["delete", "csr", csr_name.as_ref()]) .stderr(std::process::Stdio::piped()) .stdout(std::process::Stdio::piped()) .spawn() } trait ResultSequence { type SuccessItem; type FailureItem; fn partition_success(self) -> (Vec<Self::SuccessItem>, Vec<Self::FailureItem>); } impl<T, E: std::fmt::Debug> ResultSequence for Vec<Result<T, E>> { type SuccessItem = T; type FailureItem = E; fn partition_success(self) -> (Vec<Self::SuccessItem>, Vec<Self::FailureItem>) { let (success_results, error_results): (Vec<_>, Vec<_>) = self.into_iter().partition(|r| r.is_ok()); let success_values = success_results.into_iter().map(|r| r.unwrap()).collect(); let error_values = error_results .into_iter() .map(|r| r.err().unwrap()) .collect(); (success_values, error_values) } } fn is_resource_gone(kubectl_output: &std::process::Output) -> bool { kubectl_output.status.success() || match String::from_utf8(kubectl_output.stderr.clone()) { Ok(s) => s.contains("NotFound"), _ => false, } } fn run_bootstrap() -> anyhow::Result<()> { let (shell, ext) = match std::env::consts::OS { "windows" => Ok(("powershell.exe", "ps1")), "linux" | "macos" => Ok(("bash", "sh")), os => Err(anyhow::anyhow!("Unsupported OS {}", os)), }?; let repo_root = std::env!("CARGO_MANIFEST_DIR"); let bootstrap_script = format!("{}/docs/howto/assets/bootstrap.{}", repo_root, ext); let bootstrap_output = std::process::Command::new(shell) .arg(bootstrap_script) .env("CONFIG_DIR", config_dir()) .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .output()?; match bootstrap_output.status.code() { Some(0) => Ok(()), Some(e) => Err(anyhow::anyhow!( "Bootstrap error {}: {}", e, String::from_utf8_lossy(&bootstrap_output.stderr) )), None => Err(anyhow::anyhow!( "Bootstrap error (no exit code): {}", String::from_utf8_lossy(&bootstrap_output.stderr) )), } } fn launch_kubelet( name: &str, kubeconfig_suffix: &str, kubelet_port: i32, need_csr: bool, ) -> anyhow::Result<OwnedChildProcess> { // run the kubelet as a background process using the // same cmd line as in the justfile: // KUBECONFIG=$(eval echo $CONFIG_DIR)/kubeconfig-wasi cargo run --bin krustlet-wasi {{FLAGS}} -- --node-name krustlet-wasi --port 3001 --bootstrap-file $(eval echo $CONFIG_DIR)/bootstrap.conf --cert-file $(eval echo $CONFIG_DIR)/krustlet-wasi.crt --private-key-file $(eval echo $CONFIG_DIR)/krustlet-wasi.key let bootstrap_conf = config_file_path_str("bootstrap.conf"); let cert = config_file_path_str(format!("{}.crt", name)); let private_key = config_file_path_str(format!("{}.key", name)); let kubeconfig = config_file_path_str(format!("kubeconfig-{}", kubeconfig_suffix)); let port_arg = format!("{}", kubelet_port); let repo_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); let bin_path = repo_root.join("target/debug").join(name); let mut launch_kubelet_process = std::process::Command::new(bin_path) .args(&[ "--node-name", name, "--port", &port_arg, "--bootstrap-file", &bootstrap_conf, "--cert-file", &cert, "--private-key-file", &private_key, "--x-allow-local-modules", "true", ]) .env("KUBECONFIG", kubeconfig) .env( "RUST_LOG", "wascc_host=debug,wascc_provider=debug,wasi_provider=debug,main=debug", ) .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .spawn()?; println!("Kubelet process {} launched", name); if need_csr { println!("Waiting for kubelet {} to generate CSR", name); let stdout = launch_kubelet_process.stdout.as_mut().unwrap(); wait_for_tls_certificate_approval(stdout)?; println!("Finished bootstrapping for kubelet {}", name); } let terminator = OwnedChildProcess { terminated: false, child: launch_kubelet_process, }; Ok(terminator) } fn wait_for_tls_certificate_approval(stdout: impl std::io::Read) -> anyhow::Result<()> { let reader = std::io::BufReader::new(stdout); for (_, line) in reader.lines().enumerate() { match line { Ok(line_text) => { println!("Kubelet printed: {}", line_text); if line_text == "BOOTSTRAP: received TLS certificate approval: continuing" { return Ok(()); } let re = regex::Regex::new(r"^BOOTSTRAP: TLS certificate requires manual approval. Run kubectl certificate approve (\S+)$").unwrap(); match re.captures(&line_text) { None => (), Some(captures) => { let csr_name = &captures[1]; approve_csr(csr_name)? } } } Err(e) => eprintln!("Error reading kubelet stdout: {}", e), } } println!("End of kubelet output with no approval"); Err(anyhow::anyhow!("End of kubelet output with no approval")) } fn approve_csr(csr_name: &str) -> anyhow::Result<()> { println!("Approving CSR {}", csr_name); let approve_process = std::process::Command::new("kubectl") .args(&["certificate", "approve", csr_name]) .stderr(std::process::Stdio::piped()) .stdout(std::process::Stdio::piped()) .output()?; if !approve_process.status.success() { Err(anyhow::anyhow!( "Error approving CSR {}: {}", csr_name, String::from_utf8(approve_process.stderr).unwrap() )) } else { println!("Approved CSR {}", csr_name); clean_up_csr(csr_name) } } fn clean_up_csr(csr_name: &str) -> anyhow::Result<()> { println!("Cleaning up approved CSR {}", csr_name); let clean_up_process = std::process::Command::new("kubectl") .args(&["delete", "csr", csr_name]) .stderr(std::process::Stdio::piped()) .stdout(std::process::Stdio::piped()) .output()?; if !clean_up_process.status.success() { Err(anyhow::anyhow!( "Error cleaning up CSR {}: {}", csr_name, String::from_utf8(clean_up_process.stderr).unwrap() )) } else { println!("Cleaned up approved CSR {}", csr_name); Ok(()) } } struct OwnedChildProcess { terminated: bool, child: std::process::Child, } impl OwnedChildProcess { fn terminate(&mut self) -> anyhow::Result<()> { match self.child.kill().and_then(|_| self.child.wait()) { Ok(_) => { self.terminated = true; Ok(()) } Err(e) => Err(anyhow::anyhow!( "Failed to terminate spawned kubelet process: {}", e )), } } fn exited(&mut self) -> anyhow::Result<bool> { let exit_status = self.child.try_wait()?; Ok(exit_status.is_some()) } } impl Drop for OwnedChildProcess { fn drop(&mut self) { if !self.terminated { match self.terminate() { Ok(()) => (), Err(e) => eprintln!("{}", e), } } } } fn run_tests(readiness: BootstrapReadiness) -> anyhow::Result<()> { let wasi_process_result = launch_kubelet( "krustlet-wasi", "wasi", 3001, matches!(readiness, BootstrapReadiness::NeedBootstrapAndApprove), ); let wascc_process_result = launch_kubelet( "krustlet-wascc", "wascc", 3000, matches!(readiness, BootstrapReadiness::NeedBootstrapAndApprove), ); for process in &[&wasi_process_result, &wascc_process_result] { match process { Err(e) => { eprintln!("Error running kubelet process: {}", e); return Err(anyhow::anyhow!("Error running kubelet process: {}", e)); } Ok(_) => println!("Running kubelet process"), } } let test_result = run_test_suite(); let mut wasi_process = wasi_process_result.unwrap(); let mut wascc_process = wascc_process_result.unwrap(); if matches!(test_result, Err(_)) { warn_if_premature_exit(&mut wasi_process, "krustlet-wasi"); warn_if_premature_exit(&mut wascc_process, "krustlet-wascc"); // TODO: ideally we shouldn't have to wait for termination before getting logs let terminate_result = wasi_process .terminate() .and_then(|_| wascc_process.terminate()); match terminate_result { Ok(_) => { let wasi_log_destination = std::path::PathBuf::from("./krustlet-wasi-e2e"); capture_kubelet_logs( "krustlet-wasi", &mut wasi_process.child, wasi_log_destination, ); let wascc_log_destination = std::path::PathBuf::from("./krustlet-wascc-e2e"); capture_kubelet_logs( "krustlet-wascc", &mut wascc_process.child, wascc_log_destination, ); } Err(e) => { eprintln!("{}", e); eprintln!("Can't capture kubelet logs as they didn't terminate"); } } } test_result } fn warn_if_premature_exit(process: &mut OwnedChildProcess, name: &str) { match process.exited() { Err(e) => eprintln!( "FAILED checking kubelet process {} exit state ({})", name, e ), Ok(false) => eprintln!("WARNING: Kubelet process {} exited prematurely", name), _ => (), }; } fn run_test_suite() -> anyhow::Result<()> { println!("Launching integration tests"); let test_process = std::process::Command::new("cargo") .args(&["test", "--test", "integration_tests"]) .stderr(std::process::Stdio::piped()) .stdout(std::process::Stdio::piped()) .spawn()?; println!("Integration tests running"); // TODO: consider streaming progress // TODO: capture pod logs: probably requires cooperation from the test // process let test_process_result = test_process.wait_with_output()?; if test_process_result.status.success() { println!("Integration tests PASSED"); Ok(()) } else { let stdout = String::from_utf8(test_process_result.stdout)?; eprintln!("{}", stdout); let stderr = String::from_utf8(test_process_result.stderr)?; eprintln!("{}", stderr); eprintln!("Integration tests FAILED"); Err(anyhow::anyhow!(stderr)) } } fn capture_kubelet_logs( kubelet_name: &str, kubelet_process: &mut std::process::Child, destination: std::path::PathBuf, ) { let stdout = kubelet_process.stdout.as_mut().unwrap(); let stdout_path = destination.with_extension("stdout.txt"); write_kubelet_log_to_file(kubelet_name, stdout, stdout_path); let stderr = kubelet_process.stderr.as_mut().unwrap(); let stderr_path = destination.with_extension("stderr.txt"); write_kubelet_log_to_file(kubelet_name, stderr, stderr_path); } fn write_kubelet_log_to_file( kubelet_name: &str, log: &mut impl std::io::Read, file_path: std::path::PathBuf, ) { let mut file_result = std::fs::File::create(file_path); match file_result { Ok(ref mut file) => { let write_result = std::io::copy(log, file); match write_result { Ok(_) => (), Err(e) => eprintln!("Can't capture {} output: {}", kubelet_name, e), } } Err(e) => { eprintln!("Can't capture {} output: {}", kubelet_name, e); } } }
31.990893
313
0.579855
ffb356819e1e1323d851522317021c5b35672e10
8,918
// AUTOGENERATED FROM index-macintosh.txt, ORIGINAL COMMENT FOLLOWS: // // Any copyright is dedicated to the Public Domain. // https://creativecommons.org/publicdomain/zero/1.0/ // // For details on index index-macintosh.txt see the Encoding Standard // https://encoding.spec.whatwg.org/ // // Identifier: f2c6a4f6406b3e86a50a5dba4d2b7dd48e2e33c0d82aefe764535c934ec11764 // Date: 2014-12-19 static FORWARD_TABLE: &'static [u16] = &[ 196, 197, 199, 201, 209, 214, 220, 225, 224, 226, 228, 227, 229, 231, 233, 232, 234, 235, 237, 236, 238, 239, 241, 243, 242, 244, 246, 245, 250, 249, 251, 252, 8224, 176, 162, 163, 167, 8226, 182, 223, 174, 169, 8482, 180, 168, 8800, 198, 216, 8734, 177, 8804, 8805, 165, 181, 8706, 8721, 8719, 960, 8747, 170, 186, 937, 230, 248, 191, 161, 172, 8730, 402, 8776, 8710, 171, 187, 8230, 160, 192, 195, 213, 338, 339, 8211, 8212, 8220, 8221, 8216, 8217, 247, 9674, 255, 376, 8260, 8364, 8249, 8250, 64257, 64258, 8225, 183, 8218, 8222, 8240, 194, 202, 193, 203, 200, 205, 206, 207, 204, 211, 212, 63743, 210, 218, 219, 217, 305, 710, 732, 175, 728, 729, 730, 184, 733, 731, 711, ]; /// Returns the index code point for pointer `code` in this index. #[inline] pub fn forward(code: u8) -> u16 { FORWARD_TABLE[(code - 0x80) as usize] } static BACKWARD_TABLE_LOWER: &'static [u8] = &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 202, 193, 162, 163, 0, 180, 0, 164, 172, 169, 187, 199, 194, 0, 168, 248, 161, 177, 0, 0, 171, 181, 166, 225, 252, 0, 188, 200, 0, 0, 0, 192, 203, 231, 229, 204, 128, 129, 174, 130, 233, 131, 230, 232, 237, 234, 235, 236, 0, 132, 241, 238, 239, 205, 133, 0, 175, 244, 242, 243, 134, 0, 0, 167, 136, 135, 137, 139, 138, 140, 190, 141, 143, 142, 144, 145, 147, 146, 148, 149, 0, 150, 152, 151, 153, 155, 154, 214, 191, 157, 156, 158, 159, 0, 0, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 245, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 206, 207, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 217, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 246, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 249, 250, 251, 254, 247, 253, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 208, 209, 0, 0, 0, 212, 213, 226, 0, 210, 211, 227, 0, 160, 224, 165, 0, 0, 0, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 220, 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 219, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 182, 0, 0, 0, 198, 0, 0, 0, 0, 0, 0, 0, 0, 184, 0, 183, 0, 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 173, 0, 0, 0, 178, 179, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 215, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 240, 0, 222, 223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; static BACKWARD_TABLE_UPPER: &'static [u16] = &[ 0, 0, 64, 128, 192, 256, 320, 0, 0, 0, 0, 384, 0, 0, 448, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 576, 640, 704, 0, 768, 0, 0, 0, 832, 896, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 960, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 1088, ]; /// Returns the index pointer for code point `code` in this index. #[inline] pub fn backward(code: u32) -> u8 { let offset = (code >> 6) as usize; let offset = if offset < 1005 {BACKWARD_TABLE_UPPER[offset] as usize} else {0}; BACKWARD_TABLE_LOWER[offset + ((code & 63) as usize)] } #[cfg(feature = "enclave_unit_test")] single_byte_tests!( mod = macintosh );
63.7
83
0.406593
b999805b796f70e4deb8529b1d19125c03f61468
1,330
#[macro_use] extern crate rocket; use std::collections::HashMap; use rocket::State; use rocket_contrib::uuid::Uuid; use rocket_contrib::uuid::extern_uuid; #[cfg(test)] mod tests; // A small people mapping in managed state for the sake of this example. In a // real application this would be a database. Notice that we use the external // Uuid type here and not the rocket_contrib::uuid::Uuid type. We do this purely // for demonstrative purposes; in practice, we could use the contrib `Uuid`. struct People(HashMap<extern_uuid::Uuid, &'static str>); #[get("/people/<id>")] fn people(id: Uuid, people: &State<People>) -> Result<String, String> { // Because Uuid implements the Deref trait, we use Deref coercion to convert // rocket_contrib::uuid::Uuid to uuid::Uuid. Ok(people.0.get(&id) .map(|person| format!("We found: {}", person)) .ok_or_else(|| format!("Person not found for UUID: {}", id))?) } #[launch] fn rocket() -> _ { let mut map = HashMap::new(); map.insert("7f205202-7ba1-4c39-b2fc-3e630722bf9f".parse().unwrap(), "Lacy"); map.insert("4da34121-bc7d-4fc1-aee6-bf8de0795333".parse().unwrap(), "Bob"); map.insert("ad962969-4e3d-4de7-ac4a-2d86d6d10839".parse().unwrap(), "George"); rocket::build() .manage(People(map)) .mount("/", routes![people]) }
35.945946
82
0.675188
7aba6f46527ced81142c3c355c027337c34608fc
7,712
use crate::{ aggregator::{Aggregator, Context}, async_sender::AsyncSender, output_filter::OutputFilter, }; use futures::stream::{FuturesUnordered, StreamExt}; use core::{mem, time::Duration}; use std::sync::{Arc, Mutex, RwLock}; use crossbeam_channel::Receiver; use tokio::{signal, task::JoinHandle, time}; #[derive(Copy, Clone, Debug, PartialEq)] pub(crate) enum Command { Continue, Shutdown, } /// The async aggregator pipeline. /// An example for why generics are horrible. pub struct Pipeline<Item: Send + Sized + 'static, State: Send + 'static = ()> { /// List of OutputFilters to iterate over in the receiver task. filters: Vec<Box<dyn OutputFilter<Item = Item>>>, /// List of aggregator tasks to spawn. aggregators: Vec<Box<dyn Aggregator<Item = Item, PipelineState = State>>>, /// The number of aggregator tasks spawned so far. task_number: Option<usize>, /// The sending part of the item pipeline. /// This will be cloned for each aggregator task spawned. item_sender: AsyncSender<Item>, /// The receiving end of the item pipeline. /// There's only one and it will be taken after spawning the receiver task. item_receiver: Option<Receiver<Item>>, /// Internal flag telling the aggregator loops whether or not to shut down cmd_flag: Arc<RwLock<Command>>, /// Internal flag telling the output filter loop whether or not to shut down output_cmd_flag: Arc<RwLock<Command>>, /// State that gets shared through the context struct /// for the Aggregators state: Arc<Mutex<State>>, handles: Vec<JoinHandle<()>>, output_handle: Option<JoinHandle<()>>, } impl<Item: Send + Sized + 'static, State: Send + 'static> Pipeline<Item, State> { pub fn new(state: State) -> Self { let (sender, receiver) = crossbeam_channel::unbounded(); Self { filters: Vec::new(), aggregators: Vec::new(), task_number: None, item_sender: AsyncSender::new(sender, None), item_receiver: Some(receiver), cmd_flag: Arc::new(RwLock::new(Command::Continue)), output_cmd_flag: Arc::new(RwLock::new(Command::Continue)), state: Arc::new(Mutex::new(state)), handles: Vec::new(), output_handle: None, } } pub fn set_output_filters(mut self, filters: Vec<Box<dyn OutputFilter<Item = Item>>>) -> Self { self.filters = filters; self } pub fn add_output_filter(mut self, filter: impl OutputFilter<Item = Item>) -> Self { self.filters.push(Box::new(filter)); self } pub fn set_aggregators( mut self, aggregators: Vec<Box<dyn Aggregator<Item = Item, PipelineState = State>>>, ) -> Self { self.aggregators = aggregators; self } pub fn add_aggregator( mut self, aggregator: impl Aggregator<Item = Item, PipelineState = State>, ) -> Self { self.aggregators.push(Box::new(aggregator)); self } /// Spawns an async task that will listen for items found by the aggregators /// and apply all result filters in order, until one consumes it. /// /// # Example /// ```no_run /// Pipeline::new() /// .add_output_filter(some_output_filter) /// .spawn_output_filters() /// ``` /// /// # Panics /// This panics if it gets called more than one time pub async fn spawn_output_filters(mut self) -> Self { let receiver = self .item_receiver .take() .expect("Attempted to spawn receiver task twice (no receiver left)"); let mut out_filters = mem::take(&mut self.filters); let output_cmd_flag = Arc::clone(&self.output_cmd_flag); let output_handle: JoinHandle<()> = tokio::spawn(async move { debug!("Spawned receiver task!"); let output_cmd_flag = Arc::clone(&output_cmd_flag); 'outer: loop { if let Some(mut item) = receiver.try_recv().ok() { debug!("Found a match!"); 'l: for filter in out_filters.iter_mut() { item = match filter.filter(item).await { Some(i) => i, None => break 'l, } } } else { // if nothing is in the queue and the shutdown flag is set terminate the loop if let Ok(lock) = output_cmd_flag.read() { if *lock == Command::Shutdown { info!("Shutting down output filter loop"); break 'outer; } } trace!("Nothing in the queue, sleeping for about a second..."); time::sleep(Duration::from_millis(100)).await; } } }); self.output_handle = Some(output_handle); self } pub async fn spawn_aggregators(mut self) -> Self { let mut task_number = self.task_number.map(|n| n + 1).unwrap_or(0); for mut aggregator in self.aggregators.drain(..) { let mut ctx = Context::new( self.item_sender.clone(), Arc::clone(&self.state), task_number, Arc::clone(&self.cmd_flag), ); let handle = tokio::spawn(async move { info!("Spawned aggregator task #{}", task_number); let mut next = time::Instant::now(); 'l: loop { if let Ok(lock) = ctx.cmd_flag.read() { if *lock == Command::Shutdown { info!("Shutting down task #{}", ctx.task_num()); break 'l; } } if time::Instant::now() < next { time::sleep(Duration::from_secs(1)).await; continue; } if let Err(why) = aggregator.poll(&mut ctx).await { error!("{:?}", why) } next += aggregator.sleep_duration(); } }); self.handles.push(handle); task_number += 1; } self.task_number = Some(task_number); self } pub async fn shutdown(self) { { let mut lock = self.cmd_flag.write().unwrap(); *lock = Command::Shutdown; info!("Set aggregator command flag to SHUTDOWN"); } info!("Waiting for aggregators to finish..."); let mut handles: FuturesUnordered<JoinHandle<()>> = self.handles.into_iter().collect(); while let Some(r) = handles.next().await { if let Err(e) = r { error!("aggregator threw an error while terminating: {}", e); } } { let mut lock = self.output_cmd_flag.write().unwrap(); *lock = Command::Shutdown; info!("Set output filter command flag to SHUTDOWN"); } if let Some(h) = self.output_handle { info!("Processing leftover aggregated data"); h.await.ok(); } } pub async fn spin(self) { if let Ok(_) = signal::ctrl_c().await { self.shutdown().await; } } pub async fn setup_and_run(self) { self.spawn_output_filters() .await .spawn_aggregators() .await .spin() .await } }
31.477551
99
0.532287
de0776683a60bf6ad6bbd76dbd09dca489d3e292
1,658
use derive_more::Display; use yew::prelude::*; use crate::components::Badge; #[derive(Debug, Display, Clone, Copy, PartialEq, PartialOrd)] pub enum TitleLevel { H1, H2, H3, H4, } impl TitleLevel { pub fn incremented(&self) -> Option<Self> { match self { TitleLevel::H1 => Some(TitleLevel::H2), TitleLevel::H2 => Some(TitleLevel::H3), TitleLevel::H3 => Some(TitleLevel::H4), TitleLevel::H4 => None, } } } #[derive(Debug, Clone, PartialEq, Properties)] pub struct TitleProps { #[prop_or_default] pub classes: Classes, #[prop_or(TitleLevel::H1)] pub level: TitleLevel, #[prop_or_default] pub text: String, #[prop_or_default] pub meta: Option<String>, #[prop_or_default] pub id: Option<String>, #[prop_or(false)] pub raw: bool, #[prop_or(false)] pub capitalize: bool, } #[function_component(Title)] pub fn title(props: &TitleProps) -> Html { let text = if props.raw { html! { <code>{ &props.text }</code> } } else if props.capitalize { html! { <span class="capitalize">{ props.text.to_lowercase() }</span> } } else { html! { &props.text } }; let meta = match &props.meta { Some(meta) => html! { <Badge classes={classes!("bg-brand-bg-message", "dark:bg-brand-dark-bg-message", "ml-2")} text={meta.clone()} /> }, None => html! {}, }; html! { <@{props.level.to_string()} class={props.classes.clone()} id={props.id.clone()}> { text }{ meta } </@> } }
22.405405
124
0.547648
4893c206195cbbcb96e90e328633c61c8b765d82
640
pub fn hello() -> String { "world".to_string() } /// Convert a `Vec<&str>` to a `Vec<String>` pub fn to_vec_string(vstr: &Vec<&str>) -> Vec<String> { vstr.iter().map(|s| s.to_string()).collect() } /// Convert a `Vec<&str>` to a `Vec<String>` where the strings are upper case pub fn to_vec_uc_string(vstr: &Vec<&str>) -> Vec<String> { vstr.iter().map(|s| s.to_string().to_uppercase()).collect() } /// Join a `Vec` of `String` into a `String` pub fn vec_string_to_string(v: &Vec<String>) -> String { let r2 = v.iter().fold(String::from(""), |mut sum, the_str| { sum.push_str(the_str); sum }); r2 }
26.666667
77
0.6
6290058aa098d6dffc920694a418a2c9b66b0c33
4,951
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[cfg(web_sys_unstable_apis)] #[wasm_bindgen] extern "C" { # [wasm_bindgen (extends = :: js_sys :: Object , js_name = XRRenderStateInit)] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `XrRenderStateInit` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `XrRenderStateInit`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub type XrRenderStateInit; } #[cfg(web_sys_unstable_apis)] impl XrRenderStateInit { #[doc = "Construct a new `XrRenderStateInit`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `XrRenderStateInit`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn new() -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret } #[cfg(web_sys_unstable_apis)] #[cfg(feature = "XrWebGlLayer")] #[doc = "Change the `baseLayer` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `XrRenderStateInit`, `XrWebGlLayer`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn base_layer(&mut self, val: Option<&XrWebGlLayer>) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("baseLayer"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[cfg(web_sys_unstable_apis)] #[doc = "Change the `depthFar` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `XrRenderStateInit`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn depth_far(&mut self, val: f64) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("depthFar"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[cfg(web_sys_unstable_apis)] #[doc = "Change the `depthNear` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `XrRenderStateInit`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn depth_near(&mut self, val: f64) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("depthNear"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[cfg(web_sys_unstable_apis)] #[doc = "Change the `inlineVerticalFieldOfView` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `XrRenderStateInit`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn inline_vertical_field_of_view(&mut self, val: f64) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("inlineVerticalFieldOfView"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } }
42.681034
128
0.604322
8a7271c38ce242ef7839e25cbbbd84d4aa3d454d
740
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass #![allow(dead_code)] // pretty-expanded FIXME #23616 enum Foo { Bar, Baz, } fn foo(f: Foo) { match f { Foo::Bar => {}, #[cfg(not(asdfa))] Foo::Baz => {}, #[cfg(afsd)] Basdfwe => {} } } pub fn main() {}
23.870968
68
0.631081
4b48d2b1ad4813f673b69a73a595666d074dcfea
2,480
/* Copyright 2021 Integritee AG and Supercomputing Systems AG Copyright (C) 2017-2019 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use crate::ocall_bridge::bridge_api::{Bridge, RemoteAttestationBridge}; use log::*; use sgx_types::{c_int, sgx_status_t}; use std::sync::Arc; #[no_mangle] pub extern "C" fn ocall_get_ias_socket(ret_fd: *mut c_int) -> sgx_status_t { get_ias_socket(ret_fd, Bridge::get_ra_api()) // inject the RA API (global state) } fn get_ias_socket(ret_fd: *mut c_int, ra_api: Arc<dyn RemoteAttestationBridge>) -> sgx_status_t { debug!(" Entering ocall_get_ias_socket"); let socket_result = ra_api.get_ias_socket(); return match socket_result { Ok(s) => { unsafe { *ret_fd = s; } sgx_status_t::SGX_SUCCESS }, Err(e) => { error!("[-] Failed to get IAS socket: {:?}", e); return e.into() }, } } #[cfg(test)] mod tests { use super::*; use crate::ocall_bridge::bridge_api::{MockRemoteAttestationBridge, OCallBridgeError}; use std::sync::Arc; #[test] fn get_socket_sets_pointer_result() { let expected_socket = 4321i32; let mut ra_ocall_api_mock = MockRemoteAttestationBridge::new(); ra_ocall_api_mock .expect_get_ias_socket() .times(1) .returning(move || Ok(expected_socket)); let mut ias_sock: i32 = 0; let ret_status = get_ias_socket(&mut ias_sock as *mut i32, Arc::new(ra_ocall_api_mock)); assert_eq!(ret_status, sgx_status_t::SGX_SUCCESS); assert_eq!(ias_sock, expected_socket); } #[test] fn given_error_from_ocall_impl_then_return_sgx_error() { let mut ra_ocall_api_mock = MockRemoteAttestationBridge::new(); ra_ocall_api_mock .expect_get_ias_socket() .times(1) .returning(|| Err(OCallBridgeError::GetIasSocket("test error".to_string()))); let mut ias_sock: i32 = 0; let ret_status = get_ias_socket(&mut ias_sock as *mut i32, Arc::new(ra_ocall_api_mock)); assert_ne!(ret_status, sgx_status_t::SGX_SUCCESS); assert_eq!(ias_sock, 0); } }
28.505747
97
0.729839
1a540721514d6dd4836320cd45ce090530d1636c
4,128
// Generated from definition io.k8s.api.apps.v1beta2.DaemonSetUpdateStrategy /// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. #[derive(Clone, Debug, Default, PartialEq)] pub struct DaemonSetUpdateStrategy { /// Rolling update config params. Present only if type = "RollingUpdate". pub rolling_update: Option<crate::api::apps::v1beta2::RollingUpdateDaemonSet>, /// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. pub type_: Option<String>, } impl<'de> crate::serde::Deserialize<'de> for DaemonSetUpdateStrategy { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_rolling_update, Key_type_, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "rollingUpdate" => Field::Key_rolling_update, "type" => Field::Key_type_, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = DaemonSetUpdateStrategy; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("DaemonSetUpdateStrategy") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_rolling_update: Option<crate::api::apps::v1beta2::RollingUpdateDaemonSet> = None; let mut value_type_: Option<String> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_rolling_update => value_rolling_update = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_type_ => value_type_ = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(DaemonSetUpdateStrategy { rolling_update: value_rolling_update, type_: value_type_, }) } } deserializer.deserialize_struct( "DaemonSetUpdateStrategy", &[ "rollingUpdate", "type", ], Visitor, ) } } impl crate::serde::Serialize for DaemonSetUpdateStrategy { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( "DaemonSetUpdateStrategy", self.rolling_update.as_ref().map_or(0, |_| 1) + self.type_.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.rolling_update { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "rollingUpdate", value)?; } if let Some(value) = &self.type_ { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "type", value)?; } crate::serde::ser::SerializeStruct::end(state) } }
40.871287
134
0.546512
64b8d12138451d2c86d5b130937e8591f847903a
437
extern crate serde_json; extern crate serde; extern crate wasm_bindgen; #[macro_use] extern crate serde_derive; mod vector2d; mod response; mod poly_box; mod circle; mod polygon; mod wasm; mod response_lite; mod web_bridge; pub use vector2d::Vector2d; pub use response::Response; pub use poly_box::PolyBox; pub use circle::Circle; pub use polygon::Polygon; pub use response_lite::ResponseLite; pub use web_bridge::init_panic_handler;
18.208333
39
0.796339
169a67b8870a90d73888c23634de4f5b81f085f7
7,133
use fuel_types::Bytes32; use core::fmt; use core::ops::Deref; /// Asymmetric secret key #[derive(Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[repr(transparent)] pub struct SecretKey(Bytes32); impl SecretKey { /// Memory length of the type pub const LEN: usize = Bytes32::LEN; /// Add a conversion from arbitrary slices into owned /// /// # Safety /// /// There is no guarantee the provided bytes will fit the field. The field /// security can be checked with [`SecretKey::is_in_field`]. pub unsafe fn from_bytes_unchecked(bytes: [u8; Self::LEN]) -> Self { Self(bytes.into()) } /// Add a conversion from arbitrary slices into owned /// /// # Safety /// /// This function will not panic if the length of the slice is smaller than /// `Self::LEN`. Instead, it will cause undefined behavior and read random /// disowned bytes. /// /// There is no guarantee the provided bytes will fit the field. pub unsafe fn from_slice_unchecked(bytes: &[u8]) -> Self { Self(Bytes32::from_slice_unchecked(bytes)) } /// Copy-free reference cast /// /// There is no guarantee the provided bytes will fit the field. /// /// # Safety /// /// Inputs smaller than `Self::LEN` will cause undefined behavior. pub unsafe fn as_ref_unchecked(bytes: &[u8]) -> &Self { // The interpreter will frequently make references to keys and values using // logically checked slices. // // This function will avoid unnecessary copy to owned slices for the interpreter // access &*(bytes.as_ptr() as *const Self) } } impl Deref for SecretKey { type Target = [u8; SecretKey::LEN]; fn deref(&self) -> &[u8; SecretKey::LEN] { self.0.deref() } } impl AsRef<[u8]> for SecretKey { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } impl From<SecretKey> for [u8; SecretKey::LEN] { fn from(salt: SecretKey) -> [u8; SecretKey::LEN] { salt.0.into() } } impl fmt::LowerHex for SecretKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl fmt::UpperHex for SecretKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl fmt::Debug for SecretKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Display for SecretKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } #[cfg(feature = "std")] mod use_std { use super::*; use crate::{Error, PublicKey}; use secp256k1::{Error as Secp256k1Error, SecretKey as Secp256k1SecretKey}; use core::borrow::Borrow; use core::str; #[cfg(feature = "random")] use rand::{ distributions::{Distribution, Standard}, Rng, }; impl SecretKey { /// Create a new random secret #[cfg(feature = "random")] pub fn random<R>(rng: &mut R) -> Self where R: rand::Rng + ?Sized, { // TODO there is no clear API to generate a scalar for secp256k1. This code is // very inefficient and not constant time; it was copied from // https://github.com/rust-bitcoin/rust-secp256k1/blob/ada3f98ab65e6f12cf1550edb0b7ae064ecac153/src/key.rs#L101 // // Need to improve; generate random bytes and truncate to the field. // // We don't call `Secp256k1SecretKey::new` here because the `rand` requirements // are outdated and inconsistent. use secp256k1::ffi::{self, CPtr}; let mut secret = Bytes32::zeroed(); loop { rng.fill(secret.as_mut()); // Safety: FFI call let overflow = unsafe { ffi::secp256k1_ec_seckey_verify( ffi::secp256k1_context_no_precomp, secret.as_c_ptr(), ) }; if overflow != 0 { break; } } Self(secret) } /// Check if the provided slice represents a scalar that fits the field. /// /// # Safety /// /// This function extends the unsafety of /// [`SecretKey::as_ref_unchecked`]. pub unsafe fn is_slice_in_field_unchecked(slice: &[u8]) -> bool { use secp256k1::ffi::{self, CPtr}; let secret = Self::as_ref_unchecked(slice); // Safety: FFI call let overflow = ffi::secp256k1_ec_seckey_verify( ffi::secp256k1_context_no_precomp, secret.as_c_ptr(), ); overflow != 0 } /// Check if the secret key representation fits the scalar field. pub fn is_in_field(&self) -> bool { // Safety: struct is guaranteed to reference itself with correct len unsafe { Self::is_slice_in_field_unchecked(self.as_ref()) } } /// Return the curve representation of this secret. /// /// The discrete logarithm property guarantees this is a one-way /// function. pub fn public_key(&self) -> PublicKey { PublicKey::from(self) } } impl TryFrom<Bytes32> for SecretKey { type Error = Error; fn try_from(b: Bytes32) -> Result<Self, Self::Error> { let secret = SecretKey(b); secret .is_in_field() .then(|| secret) .ok_or(Error::InvalidSecretKey) } } impl TryFrom<&[u8]> for SecretKey { type Error = Error; fn try_from(slice: &[u8]) -> Result<Self, Self::Error> { Bytes32::try_from(slice) .map_err(|_| Secp256k1Error::InvalidSecretKey.into()) .and_then(SecretKey::try_from) } } impl str::FromStr for SecretKey { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { Bytes32::from_str(s) .map_err(|_| Secp256k1Error::InvalidSecretKey.into()) .and_then(SecretKey::try_from) } } impl Borrow<Secp256k1SecretKey> for SecretKey { fn borrow(&self) -> &Secp256k1SecretKey { // Safety: field checked. The memory representation of the secp256k1 key is // `[u8; 32]` unsafe { &*(self.as_ref().as_ptr() as *const Secp256k1SecretKey) } } } #[cfg(feature = "random")] impl rand::Fill for SecretKey { fn try_fill<R: rand::Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), rand::Error> { *self = Self::random(rng); Ok(()) } } #[cfg(feature = "random")] impl Distribution<SecretKey> for Standard { fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> SecretKey { SecretKey::random(rng) } } }
28.878543
123
0.555587
ccb36de790dd9a7616f9ef4437ddda0a9ef82fa4
8,781
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::DbReader; use anyhow::{format_err, Result}; use libra_crypto::{ hash::{CryptoHash, SPARSE_MERKLE_PLACEHOLDER_HASH}, HashValue, }; use libra_state_view::{StateView, StateViewId}; use libra_types::{ access_path::AccessPath, account_address::AccountAddress, account_state::AccountState, proof::SparseMerkleProof, transaction::{Version, PRE_GENESIS_VERSION}, }; use scratchpad::{AccountStatus, SparseMerkleTree}; use std::{ cell::RefCell, collections::{hash_map::Entry, HashMap}, convert::TryInto, sync::Arc, }; /// `VerifiedStateView` is like a snapshot of the global state comprised of state view at two /// levels, persistent storage and memory. pub struct VerifiedStateView<'a> { /// For logging and debugging purpose, identifies what this view is for. id: StateViewId, /// A gateway implementing persistent storage interface, which can be a RPC client or direct /// accessor. reader: Arc<dyn DbReader>, /// The most recent version in persistent storage. latest_persistent_version: Option<Version>, /// The most recent state root hash in persistent storage. latest_persistent_state_root: HashValue, /// The in-momery version of sparse Merkle tree of which the states haven't been committed. speculative_state: &'a SparseMerkleTree, /// The cache of verified account states from `reader` and `speculative_state_view`, /// represented by a hashmap with an account address as key and a pair of an ordered /// account state map and an an optional account state proof as value. When the VM queries an /// `access_path`, this cache will first check whether `reader_cache` is hit. If hit, it /// will return the corresponding value of that `access_path`; otherwise, the account state /// will be loaded into the cache from scratchpad or persistent storage in order as a /// deserialized ordered map and then be returned. If the VM queries this account again, /// the cached data can be read directly without bothering storage layer. The proofs in /// cache are needed by ScratchPad after VM execution to construct an in-memory sparse Merkle /// tree. /// ```text /// +----------------------------+ /// | In-memory SparseMerkleTree <------+ /// +-------------^--------------+ | /// | | /// write sets | /// | cached account state map /// +-------+-------+ proof /// | V M | | /// +-------^-------+ | /// | | /// value of `account_address/path` | /// | | /// +---------------------------+---------------------+-------+ /// | +-------------------------+---------------------+-----+ | /// | | account_to_state_cache, account_to_proof_cache | | /// | +---------------^---------------------------^---------+ | /// | | | | /// | account state blob only account state blob | /// | | proof | /// | | | | /// | +---------------+--------------+ +----------+---------+ | /// | | speculative_state | | reader | | /// | +------------------------------+ +--------------------+ | /// +---------------------------------------------------------+ /// ``` account_to_state_cache: RefCell<HashMap<AccountAddress, AccountState>>, account_to_proof_cache: RefCell<HashMap<HashValue, SparseMerkleProof>>, } impl<'a> VerifiedStateView<'a> { /// Constructs a [`VerifiedStateView`] with persistent state view represented by /// `latest_persistent_state_root` plus a storage reader, and the in-memory speculative state /// on top of it represented by `speculative_state`. pub fn new( id: StateViewId, reader: Arc<dyn DbReader>, latest_persistent_version: Option<Version>, latest_persistent_state_root: HashValue, speculative_state: &'a SparseMerkleTree, ) -> Self { // Hack: When there's no transaction in the db but state tree root hash is not the // placeholder hash, it implies that there's pre-genesis state present. let latest_persistent_version = latest_persistent_version.or_else(|| { if latest_persistent_state_root != *SPARSE_MERKLE_PLACEHOLDER_HASH { Some(PRE_GENESIS_VERSION) } else { None } }); Self { id, reader, latest_persistent_version, latest_persistent_state_root, speculative_state, account_to_state_cache: RefCell::new(HashMap::new()), account_to_proof_cache: RefCell::new(HashMap::new()), } } } impl<'a> Into<( HashMap<AccountAddress, AccountState>, HashMap<HashValue, SparseMerkleProof>, )> for VerifiedStateView<'a> { fn into( self, ) -> ( HashMap<AccountAddress, AccountState>, HashMap<HashValue, SparseMerkleProof>, ) { ( self.account_to_state_cache.into_inner(), self.account_to_proof_cache.into_inner(), ) } } impl<'a> StateView for VerifiedStateView<'a> { fn id(&self) -> StateViewId { self.id } fn get(&self, access_path: &AccessPath) -> Result<Option<Vec<u8>>> { let address = access_path.address; let path = &access_path.path; match self.account_to_state_cache.borrow_mut().entry(address) { Entry::Occupied(occupied) => Ok(occupied.get().get(path).cloned()), Entry::Vacant(vacant) => { let address_hash = address.hash(); let account_blob_option = match self.speculative_state.get(address_hash) { AccountStatus::ExistsInScratchPad(blob) => Some(blob), AccountStatus::DoesNotExist => None, // No matter it is in db or unknown, we have to query from db since even the // former case, we don't have the blob data but only its hash. AccountStatus::ExistsInDB | AccountStatus::Unknown => { let (blob, proof) = match self.latest_persistent_version { Some(version) => self .reader .get_account_state_with_proof_by_version(address, version)?, None => (None, SparseMerkleProof::new(None, vec![])), }; proof .verify( self.latest_persistent_state_root, address.hash(), blob.as_ref(), ) .map_err(|err| { format_err!( "Proof is invalid for address {:?} with state root hash {:?}: {}", address, self.latest_persistent_state_root, err ) })?; assert!(self .account_to_proof_cache .borrow_mut() .insert(address_hash, proof) .is_none()); blob } }; Ok(vacant .insert( account_blob_option .as_ref() .map(TryInto::try_into) .transpose()? .unwrap_or_default(), ) .get(path) .cloned()) } } } fn multi_get(&self, _access_paths: &[AccessPath]) -> Result<Vec<Option<Vec<u8>>>> { unimplemented!(); } fn is_genesis(&self) -> bool { self.latest_persistent_version.is_none() } }
43.044118
98
0.480469